[Feature] add GA346 baseline version

Change-Id: Ic62933698569507dcf98240cdf5d9931ae34348f
diff --git a/src/kernel/modules/netsys_driver/nat/Makefile b/src/kernel/modules/netsys_driver/nat/Makefile
new file mode 100755
index 0000000..d6ddfe8
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/Makefile
@@ -0,0 +1,116 @@
+###############################################################################
+# Necessary Check
+
+#ifeq ($(AUTOCONF_H),)
+    #$(error AUTOCONF_H is not defined)
+#endif
+
+#ccflags-y += -imacros $(AUTOCONF_H)
+
+ifeq ($(TARGET_BUILD_VARIANT),$(filter $(TARGET_BUILD_VARIANT),userdebug user))
+    ldflags-y += -s
+endif
+
+# Force build fail on modpost warning
+KBUILD_MODPOST_FAIL_ON_WARNINGS := y
+###############################################################################
+ccflags-y += -I$(srctree)/include/
+###############################################################################
+
+ccflags-y += -Werror
+ccflags-y += -Wno-error=format
+ccflags-y += -Wno-error=format-extra-args
+
+###############################################################################
+MODULE_NAME := hwnat
+ifeq ($(CONFIG_WLAN_DRV_BUILD_IN),y)
+$(warning $(MODULE_NAME) build-in boot.img)
+obj-y += $(MODULE_NAME).o
+else
+$(warning $(MODULE_NAME) is kernel module)
+obj-m += $(MODULE_NAME).o
+endif
+
+# Local config
+ccflags-y += -D CONFIG_RA_NAT_HW=1
+ccflags-y += -D CONFIG_RA_HW_NAT=1
+ccflags-y += -D CONFIG_RA_HW_NAT_TBL_32K=1
+ccflags-y += -D CONFIG_RA_HW_NAT_HASH1=1
+ccflags-y += -D CONFIG_HW_NAT_AUTO_MODE=1
+ccflags-y += -D CONFIG_PPE_MCAST=1
+ccflags-y += -D CONFIG_RA_HW_NAT_WIFI=1
+ccflags-y += -D CONFIG_RA_HW_NAT_WIFI_NEW_ARCH=1
+ccflags-y += -D CONFIG_ARCH_MT7622_WIFI_HW_NAT=1
+ccflags-y += -D CONFIG_PPE_MIB=1
+ccflags-y += -D CONFIG_HW_NAT_AUTO_MODE=1
+ccflags-y += -D CONFIG_ARCH_COLGIN=1
+ccflags-y += -D CONFIG_RA_HW_NAT_IPV6=1
+ccflags-y += -D CONFIG_SUPPORT_OPENWRT=1
+ccflags-y += -D CONFIG_QDMA_SUPPORT_QOS=1
+ccflags-y += -D CONFIG_HW_NAT_SW_DVFS=1
+ccflags-y += -D CONFIG_HNAT_V1=1
+ccflags-y += -D CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT=1
+
+# Local config
+EXTRA_KCONFIG:= \
+	CONFIG_RA_NAT_HW=y \
+	CONFIG_RA_HW_NAT=m \
+	CONFIG_RA_HW_NAT_TBL_32K=y \
+	CONFIG_RA_HW_NAT_HASH1=y \
+	CONFIG_HW_NAT_AUTO_MODE=y \
+	CONFIG_PPE_MCAST=y \
+	CONFIG_RA_HW_NAT_WIFI=y \
+	CONFIG_RA_HW_NAT_WIFI_NEW_ARCH=y \
+	CONFIG_ARCH_MT7622_WIFI_HW_NAT=y \
+	CONFIG_PPE_MIB=y \
+	CONFIG_HW_NAT_AUTO_MODE=y \
+	CONFIG_ARCH_COLGIN=y \
+	CONFIG_RA_HW_NAT_IPV6=y \
+	CONFIG_SUPPORT_OPENWRT=y \
+	CONFIG_QDMA_SUPPORT_QOS=y \
+	CONFIG_HW_NAT_SW_DVFS=y \
+	CONFIG_HNAT_V1=y \
+	CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT=y
+
+# Transfer local kernel config to compile option
+EXTRA_CFLAGS:= \
+	$(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=m,%,$(filter %=m,$(EXTRA_KCONFIG)))) \
+	$(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=y,%,$(filter %=y,$(EXTRA_KCONFIG)))) \
+	$(patsubst CONFIG_%, -DCONFIG_%=0, $(patsubst %=n,%,$(filter %=n,$(EXTRA_KCONFIG)))) \
+
+$(info $$EXTRA_CFLAGS is [${EXTRA_CFLAGS}])
+
+###############################################################################
+# common_main
+###############################################################################
+ccflags-y += -I$(src)/hw_nat
+ccflags-y += -I$(src)/include
+###############################################################################
+
+$(MODULE_NAME)-objs += hw_nat/ra_nat.o
+$(MODULE_NAME)-objs += hw_nat/hwnat_ioctl.o
+$(MODULE_NAME)-objs += hw_nat/hnat_dbg_proc.o
+$(MODULE_NAME)-objs += hw_nat/hnat_common.o
+$(MODULE_NAME)-objs += hw_nat/foe_fdb.o
+$(MODULE_NAME)-objs += hw_nat/util.o
+$(MODULE_NAME)-objs += hw_nat/mcast_tbl.o
+
+#ifeq ($(CONFIG_HW_NAT_SW_DVFS),y)
+$(MODULE_NAME)-objs += hw_nat/hnat_swdvfs.o
+#endif
+
+#$(MODULE_NAME)-objs += hw_nat/hook_base.o
+#$(MODULE_NAME)-objs += hw_nat/hook_ext.o
+
+ifeq ($(CONFIG_PINCTRL_MT7622),y)
+	EXTRA_CFLAGS += -DCONFIG_ARCH_MT7622
+endif
+
+ifeq ($(CONFIG_RALINK),y)
+	EXTRA_CFLAGS += -DCONFIG_RALINK_MT7621
+endif
+
+ifeq ($(CONFIG_SOC_MT7621),y)
+	EXTRA_CFLAGS += -DCONFIG_RALINK_MT7621
+endif
+	EXTRA_CFLAGS += -DCONFIG_ARCH_COLGIN
diff --git a/src/kernel/modules/netsys_driver/nat/Makefile.ce b/src/kernel/modules/netsys_driver/nat/Makefile.ce
new file mode 100755
index 0000000..9858396
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/Makefile.ce
@@ -0,0 +1,32 @@
+# Makefile for netsys hwnat driver
+
+##############################################################
+# Common settings
+##############################################################
+
+
+##############################################################
+# Platform specific
+##############################################################
+
+
+##############################################################
+# Compile settings
+##############################################################
+
+all: driver
+
+driver:
+	+cd $(DRIVER_DIR) && make -C $(LINUX_SRC) M=$(DRIVER_DIR) MODULE_NAME=$(MODULE_NAME) PLATFORM_FLAGS="$(PLATFORM_FLAGS)" PLATFORM=${PLATFORM}  modules
+
+
+clean: driver_clean
+
+
+driver_clean:
+	cd $(DRIVER_DIR) && make -C $(LINUX_SRC) M=$(DRIVER_DIR) MODULE_NAME=$(MODULE_NAME) clean
+	if [ -e $(DRIVER_DIR)/$(MODULE_NAME).ko ]; then rm $(DRIVER_DIR)/$(MODULE_NAME).ko; fi;
+
+
+.PHONY: all clean driver driver_clean
+
diff --git a/src/kernel/modules/netsys_driver/nat/NOTICE b/src/kernel/modules/netsys_driver/nat/NOTICE
new file mode 100755
index 0000000..23c0594
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/NOTICE
@@ -0,0 +1,24 @@
+Copyright(C) 2017 MediaTek Inc.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
+following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
+disclaimer.
+    * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
+following disclaimer in the documentation and/or other materials provided with the distribution.
+    * Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote
+products derived from this software without specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
diff --git a/src/kernel/modules/netsys_driver/nat/build-check-netsys.sh b/src/kernel/modules/netsys_driver/nat/build-check-netsys.sh
new file mode 100755
index 0000000..329ddd4
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/build-check-netsys.sh
@@ -0,0 +1,39 @@
+#!/bin/bash 
+
+TIME=`date +\%F-\%T`
+echo 
+echo "######## ${TIME} ########"
+echo 
+
+mkdir OBJ
+RESULT_PATH="OBJ/"
+
+
+# MAIN
+# clean
+	make -f Makefile clean
+# make
+	make -f Makefile -j8
+
+# check 
+#   make return code
+#   check ${driver_name}.ko exist
+
+	if [ -f "${RESULT_PATH}ra_nat.o" ]; then
+		echo "HW_NAT built successfully."
+	else
+		echo "HW_NAT built FAILED (ko not found)!"
+		exit 1
+	fi
+
+	if [ -f "${RESULT_PATH}hook_ext.o" ]; then
+		echo "HOOK built successfully."
+	else
+		echo "HOOK built FAILED (ko not found)!"
+		exit 1
+	fi
+
+TIME=`date +\%F-\%T`
+echo 
+echo "######## ${TIME} ########"
+echo 
diff --git a/src/kernel/modules/netsys_driver/nat/foe_hook/Makefile b/src/kernel/modules/netsys_driver/nat/foe_hook/Makefile
new file mode 100644
index 0000000..b0d41e5
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/foe_hook/Makefile
@@ -0,0 +1,5 @@
+obj-y		+= foe_hook.o
+
+foe_hook-objs	+= hook_base.o
+foe_hook-objs	+= hook_ext.o
+
diff --git a/src/kernel/modules/netsys_driver/nat/foe_hook/Makefile.6 b/src/kernel/modules/netsys_driver/nat/foe_hook/Makefile.6
new file mode 100755
index 0000000..3743bb6
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/foe_hook/Makefile.6
@@ -0,0 +1,6 @@
+obj-y		+= foe_hook.o
+
+foe_hook-objs	+= hook_ext.o
+ifeq ($(CONFIG_RA_NAT_HW),y)
+        EXTRA_CFLAGS += -DCONFIG_RA_NAT_HW
+endif
\ No newline at end of file
diff --git a/src/kernel/modules/netsys_driver/nat/foe_hook/hook_base.c b/src/kernel/modules/netsys_driver/nat/foe_hook/hook_base.c
new file mode 100644
index 0000000..2e41170
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/foe_hook/hook_base.c
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/ra_nat.h>
+#define PURPOSE "FAST_NAT_SUPPORT"
+
+int (*ra_sw_nat_hook_rx)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_rx);
+
+int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_tx);
diff --git a/src/kernel/modules/netsys_driver/nat/foe_hook/hook_ext.c b/src/kernel/modules/netsys_driver/nat/foe_hook/hook_ext.c
new file mode 100644
index 0000000..33a0b6a
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/foe_hook/hook_ext.c
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/ra_nat.h>
+
+struct net_device	*dst_port[MAX_IF_NUM];
+EXPORT_SYMBOL(dst_port);
+u8 dst_port_type[MAX_IF_NUM];
+EXPORT_SYMBOL(dst_port_type);
+
+struct foe_entry *ppe_virt_foe_base_tmp;
+EXPORT_SYMBOL(ppe_virt_foe_base_tmp);
+struct foe_entry *ppe1_virt_foe_base_tmp;
+EXPORT_SYMBOL(ppe1_virt_foe_base_tmp);
+
+int (*ppe_hook_rx_wifi)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_wifi);
+int (*ppe_hook_tx_wifi)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_wifi);
+
+int (*ppe_hook_rx_modem)(struct sk_buff *skb, u32 cpu_reason, u32 foe_entry_num) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_modem);
+int (*ppe_hook_tx_modem)(struct sk_buff *skb, u32 net_type, u32 channel_id) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_modem);
+
+int (*ppe_hook_rx_rndis)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_rndis);
+int (*ppe_hook_tx_rndis)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_rndis);
+
+
+int (*ppe_hook_rx_eth)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_eth);
+int (*ppe_hook_tx_eth)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_eth);
+
+int (*ppe_hook_rx_ext)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(ppe_hook_rx_ext);
+int (*ppe_hook_tx_ext)(struct sk_buff *skb, int gmac_no) = NULL;
+EXPORT_SYMBOL(ppe_hook_tx_ext);
+
+void (*ppe_dev_register_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_register_hook);
+void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
+EXPORT_SYMBOL(ppe_dev_unregister_hook);
+
+int (*ppe_get_dev_stats)(struct net_device *dev, struct rtnl_link_stats64 *storage) = NULL;
+EXPORT_SYMBOL(ppe_get_dev_stats);
+
+void  hwnat_magic_tag_set_zero(struct sk_buff *skb)
+{
+	if ((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) ||
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) ||
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE) ||
+            (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_RNDIS)) {
+		if (IS_SPACE_AVAILABLE_HEAD(skb))
+			FOE_MAGIC_TAG_HEAD(skb) = 0;
+	}
+	if ((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) ||
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN) ||
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_GE) ||
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_RNDIS)) {
+		if (IS_SPACE_AVAILABLE_TAIL(skb))
+			FOE_MAGIC_TAG_TAIL(skb) = 0;
+	}
+}
+EXPORT_SYMBOL(hwnat_magic_tag_set_zero);
+
+void hwnat_check_magic_tag(struct sk_buff *skb)
+{
+	if (IS_SPACE_AVAILABLE_HEAD(skb)) {
+		FOE_MAGIC_TAG_HEAD(skb) = 0;
+		FOE_AI_HEAD(skb) = UN_HIT;
+	}
+	if (IS_SPACE_AVAILABLE_TAIL(skb)) {
+		FOE_MAGIC_TAG_TAIL(skb) = 0;
+		FOE_AI_TAIL(skb) = UN_HIT;
+	}
+}
+EXPORT_SYMBOL(hwnat_check_magic_tag);
+
+void hwnat_set_headroom_zero(struct sk_buff *skb)
+{
+	if (skb->cloned != 1) {
+		if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb) ||
+		    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PPE)) {
+			if (IS_SPACE_AVAILABLE_HEAD(skb))
+				memset(FOE_INFO_START_ADDR_HEAD(skb), 0,
+				       FOE_INFO_LEN);
+		}
+	}
+}
+EXPORT_SYMBOL(hwnat_set_headroom_zero);
+
+void hwnat_set_tailroom_zero(struct sk_buff *skb)
+{
+	if (skb->cloned != 1) {
+		if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb) ||
+		    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PPE)) {
+			if (IS_SPACE_AVAILABLE_TAIL(skb))
+				memset(FOE_INFO_START_ADDR_TAIL(skb), 0,
+				       FOE_INFO_LEN);
+		}
+	}
+}
+EXPORT_SYMBOL(hwnat_set_tailroom_zero);
+
+void hwnat_copy_headroom(u8 *data, struct sk_buff *skb)
+{
+	memcpy(data, skb->head, FOE_INFO_LEN);
+}
+EXPORT_SYMBOL(hwnat_copy_headroom);
+
+void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb)
+{
+	memcpy((data + size - FOE_INFO_LEN),
+	       (skb_end_pointer(skb) - FOE_INFO_LEN),
+	       FOE_INFO_LEN);
+}
+EXPORT_SYMBOL(hwnat_copy_tailroom);
+
+void hwnat_setup_dma_ops(struct device *dev, bool coherent)
+{
+	arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+}
+EXPORT_SYMBOL(hwnat_setup_dma_ops);
+
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/Kconfig b/src/kernel/modules/netsys_driver/nat/hw_nat/Kconfig
new file mode 100644
index 0000000..bb7dba5
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/Kconfig
@@ -0,0 +1,216 @@
+config RA_HW_NAT
+        tristate "Ralink HW NAT(** Work At Module Mode ONLY **)"
+	depends on RA_NAT_HW
+	---help---
+	  This driver supports Mediatek HW NAT.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Mediatek Ethernet devices. If you say Y,
+	  you will be asked for your specific card in the following questions.
+
+config  RA_HW_NAT_BINDING_THRESHOLD
+        int "Binding Threshold (Unit:Packet Per Second)"
+	depends on RA_HW_NAT
+	default 30
+	help
+	When flow rate > Binding Threshold  (# Packets Per Second),
+	the state will change to bind state
+
+choice
+        prompt "Foe Table Size"
+	depends on RA_HW_NAT
+	default RA_HW_NAT_TBL_4K
+	help
+	Number of entries in FoE Table
+
+config  RA_HW_NAT_TBL_1K
+        bool "1K"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_TBL_2K
+        bool "2K"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_TBL_4K
+        bool "4K"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_TBL_8K
+        bool "8K"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_TBL_16K
+        bool "16K"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_TBL_32K
+        bool "32K"
+	depends on RA_HW_NAT
+endchoice
+
+choice
+	prompt "Hash Algorithm"
+	depends on RA_HW_NAT
+	default RA_HW_NAT_HASH1
+
+config  RA_HW_NAT_HASH0
+        bool "Hash0-Simple"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_HASH1
+        bool "Hash1-Complex"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_HASH2
+        bool "Hash2-Complex"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_HASH3
+        bool "Hash3-Complex"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_HASH_DBG
+        bool "Hash-Debug"
+	depends on RA_HW_NAT
+
+endchoice
+
+choice
+        prompt "HW_NAT OP MODE"
+	depends on RA_HW_NAT && (RA_HW_NAT_HASH1 || RA_HW_NAT_HASH2)
+	default HW_NAT_AUTO_MODE
+	help
+	Say Y here if you want to enable HWNAT Operation mode on
+	MT762x series SoCs. There is three type operation mode you can choose.
+
+config  HW_NAT_AUTO_MODE
+        bool "AUTO_MODE"
+	help
+	Say Y here if you want to enable HWNAT Operation auto mode
+	on MT762x series SoCs.
+	There is hwnat auto learn mode, driver fill ppetable,
+	and set entry bind.
+
+config  HW_NAT_SEMI_AUTO_MODE
+        bool "SEMI_AUTO_MODE"
+	help
+	Say Y here if you want to enable HWNAT Operation auto mode on
+	MT762x series SoCs.
+	There is hwnat semi-auto learn mode, driver fill ppetable,
+	but user set entry bind.
+
+config  HW_NAT_MANUAL_MODE
+        bool "MANUAL_MODE"
+	help
+	Say Y here if you want to enable HWNAT Operation auto mode on
+	MT762x series SoCs.
+	There is hwnat manual mode, user fill ppetable,
+	but user set entry bind.
+
+endchoice
+
+choice
+	prompt "Hash DBG Mode"
+	depends on RA_HW_NAT_HASH_DBG
+	default RA_HW_NAT_HASH_DBG_SPORT
+
+config  RA_HW_NAT_HASH_DBG_IPV6_SIP
+        bool "IPv6 source IP[15:0]"
+
+config  RA_HW_NAT_HASH_DBG_IPV4_SIP
+        bool "IPv4 source IP[15:0]"
+
+config  RA_HW_NAT_HASH_DBG_SPORT
+        bool "source port[15:0]"
+
+endchoice
+
+config  RA_HW_NAT_IPV6
+        bool "IPv6 Acceleration"
+	depends on RA_HW_NAT
+	default y
+	help
+	"ipv6 routing accelerated by HNAT"
+
+config  PPE_MCAST
+        bool "PPE built-in multicast table support"
+	depends on RA_HW_NAT
+
+config  RA_HW_NAT_WIFI
+	bool "WiFi Acceleration"
+	depends on RA_HW_NAT
+	select RA_HW_NAT_WIFI_NEW_ARCH
+
+config  RA_HW_NAT_WIFI_NEW_ARCH
+	bool "WiFi Fast Path(WHNAT)"
+	depends on RA_HW_NAT_WIFI
+
+config  SUPPORT_WLAN_OPTIMIZE
+	bool "Wlan <-> Lan Go SW Path"
+	depends on RA_HW_NAT_WIFI
+	help
+	Say Y here if you want to enable wlan to lan will go sw path
+	on MT762x series SoCs.
+	Because bridge traffic has no benefit when hwnat accelerate.
+	We will judgment wlan and br0 has the same subnet or not.
+
+config WAN_TO_WLAN_SUPPORT_QOS
+	bool "Wan ->Wlan Support QoS"
+	depends on RA_HW_NAT_WIFI
+	default n
+	help
+	Say Y here if you want to enable HWNAT support qos on
+	MT762x series SoCs.
+	HWNAT lan <-> wan will go to qdma schedule.
+	If yoy say Y, short packet cannot wire speed.
+
+config ARCH_MT7622_WIFI_HW_NAT
+	bool "Warp Drive HW NAT"
+	depends on RA_HW_NAT_WIFI
+	help
+	Say Y here if you want to enable Warp Drive HW NAT
+	on MT762x series SoCs.
+	ethernet to wlan will go WHNAT
+	If yoy say Y, ethernet to wlan cpu loading almost equal zero.
+
+config  RA_HW_NAT_PACKET_SAMPLING
+	bool "Packet Sampling to CPU"
+	depends on RA_HW_NAT && (MACH_MT7623 || MTK_EMI_7622)
+
+config RA_HW_NAT_ACCNT_MAINTAINER
+	bool "Get Statistic Counter Periodically(Accounting Group)"
+	depends on RA_HW_NAT
+	help
+	"if you need 64bits bytes/pkts counter, and ask HNAT module to get statistic counter periodically, please enable it"
+
+config PPE_MIB
+	bool "Per Flow Mib Counter"
+	depends on RA_HW_NAT
+	help
+	"if you need bytes/pkts counter per flow entry, and ask HNAT module to get statistic counter periodically, please enable it"
+
+config QDMA_SUPPORT_QOS
+	bool "Wired Ethernet Support QoS"
+	depends on RA_HW_NAT
+	default y
+	help
+	Say Y here if you want to enable HWNAT support qos on
+	MT762x series SoCs.
+	HWNAT lan <-> wan will go to qdma schedule.
+	If yoy say Y, short packet cannot wire speed.
+
+config HW_NAT_SW_DVFS
+	bool "S/W (Timer) Based DVFS"
+	depends on RA_HW_NAT
+	default y
+	help
+	Say Y here if would like to enable SW Based DVFS feature.
+
+config HW_NAT_SW_DVFS_DEFAULT_OPP
+	int "S/W DVFS Default OPP"
+	depends on HW_NAT_SW_DVFS
+	range 0 3
+	default 3
+	help
+	Say Y here if would like to enable SW Based DVFS feature
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/Makefile b/src/kernel/modules/netsys_driver/nat/hw_nat/Makefile
new file mode 100644
index 0000000..9137d94
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/Makefile
@@ -0,0 +1,23 @@
+ccflags-y += -Idrivers/net/ethernet/raeth
+ccflags-y += -Werror
+obj-$(CONFIG_RA_HW_NAT) += hw_nat.o
+
+hw_nat-objs := ra_nat.o foe_fdb.o util.o hwnat_ioctl.o mcast_tbl.o
+hw_nat-objs += hnat_dbg_proc.o hnat_common.o
+
+ifeq ($(CONFIG_HW_NAT_SW_DVFS),y)
+	hw_nat-objs += hnat_swdvfs.o
+endif
+
+ifeq ($(CONFIG_PINCTRL_MT7622),y)
+	EXTRA_CFLAGS += -DCONFIG_ARCH_MT7622
+endif
+
+ifeq ($(CONFIG_RALINK),y)
+	EXTRA_CFLAGS += -DCONFIG_RALINK_MT7621
+endif
+
+ifeq ($(CONFIG_SOC_MT7621),y)
+        EXTRA_CFLAGS += -DCONFIG_RALINK_MT7621
+endif
+        EXTRA_CFLAGS += -DCONFIG_ARCH_COLGIN
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/Makefile.6 b/src/kernel/modules/netsys_driver/nat/hw_nat/Makefile.6
new file mode 100755
index 0000000..16cdc01
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/Makefile.6
@@ -0,0 +1,41 @@
+ccflags-y += -I$(NETSYS_DIR)/include/
+ccflags-y += -I$(NETSYS_HOOK_DIR)
+ccflags-y += -Werror
+obj-y += hw_nat.o
+
+hw_nat-objs := ra_nat.o foe_fdb.o util.o hwnat_ioctl.o mcast_tbl.o
+hw_nat-objs += hnat_dbg_proc.o hnat_common.o
+
+ifeq ($(CONFIG_PINCTRL_MT7622),y)
+	EXTRA_CFLAGS += -DCONFIG_ARCH_MT7622
+endif
+
+ifeq ($(CONFIG_RALINK),y)
+	EXTRA_CFLAGS += -DCONFIG_RALINK_MT7621
+endif
+
+ifeq ($(CONFIG_SOC_MT7621),y)
+        EXTRA_CFLAGS += -DCONFIG_RALINK_MT7621
+endif
+
+ifeq ($(CONFIG_RA_HW_NAT_TBL_32K),y)
+        EXTRA_CFLAGS += -DCONFIG_RA_HW_NAT_TBL_32K
+endif
+
+ifeq ($(CONFIG_RA_HW_NAT),y)
+        EXTRA_CFLAGS += -DCONFIG_RA_HW_NAT
+endif
+
+ifeq ($(CONFIG_RA_NAT_HW),y)
+        EXTRA_CFLAGS += -DCONFIG_RA_NAT_HW
+endif
+
+ifeq ($(CONFIG_RA_HW_NAT_HASH3),y)
+        EXTRA_CFLAGS += -DCONFIG_RA_HW_NAT_HASH3
+endif
+
+ifeq ($(CONFIG_RA_HW_NAT_BINDING_THRESHOLD),30)
+        EXTRA_CFLAGS += -DCONFIG_RA_HW_NAT_BINDING_THRESHOLD
+endif
+
+EXTRA_CFLAGS += -DCONFIG_ARCH_COLGIN
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/api.c b/src/kernel/modules/netsys_driver/nat/hw_nat/api.c
new file mode 100755
index 0000000..4baf58f
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/api.c
@@ -0,0 +1,466 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include "ra_nat.h"
+
+#include "foe_fdb.h"
+#include "frame_engine.h"
+#include "util.h"
+#include "hwnat_ioctl.h"
+#include "api.h"
+#include "hwnat_define.h"
+
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+int hash_ipv6(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	u32 t_hvt_31, t_hvt_63, t_hvt_95, t_hvt_sd;
+	u32 t_hvt_sd_23, t_hvt_sd_31_24, t_hash_32, t_hashs_16, t_ha16k, hash_index;
+	u32 ppe_saddr_127_96, ppe_saddr_95_64, ppe_saddr_63_32, ppe_saddr_31_0;
+	u32 ppe_daddr_127_96, ppe_daddr_95_64, ppe_daddr_63_32, ppe_daddr_31_0;
+	u32 ipv6_sip_127_96, ipv6_sip_95_64, ipv6_sip_63_32, ipv6_sip_31_0;
+	u32 ipv6_dip_127_96, ipv6_dip_95_64, ipv6_dip_63_32, ipv6_dip_31_0;
+	u32 sport, dport, ppe_sportv6, ppe_dportv6;
+
+	ipv6_sip_127_96 = key->ipv6_routing.sip0;
+	ipv6_sip_95_64 = key->ipv6_routing.sip1;
+	ipv6_sip_63_32 = key->ipv6_routing.sip2;
+	ipv6_sip_31_0 = key->ipv6_routing.sip3;
+	ipv6_dip_127_96 = key->ipv6_routing.dip0;
+	ipv6_dip_95_64 = key->ipv6_routing.dip1;
+	ipv6_dip_63_32 = key->ipv6_routing.dip2;
+	ipv6_dip_31_0 = key->ipv6_routing.dip3;
+	sport = key->ipv6_routing.sport;
+	dport = key->ipv6_routing.dport;
+
+	t_hvt_31 = ipv6_sip_31_0 ^ ipv6_dip_31_0 ^ (sport << 16 | dport);
+	t_hvt_63 = ipv6_sip_63_32 ^ ipv6_dip_63_32 ^ ipv6_dip_127_96;
+	t_hvt_95 = ipv6_sip_95_64 ^ ipv6_dip_95_64 ^ ipv6_sip_127_96;
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+	ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+	ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+	ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+	ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+	ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+	ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+	ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+	ppe_sportv6 = entry->ipv6_5t_route.sport;
+	ppe_dportv6 = entry->ipv6_5t_route.dport;
+	if (del != 1) {
+		if (entry->ipv6_5t_route.bfib1.state == BIND) {
+			pr_info("IPV6 Hash collision, hash index +1\n");
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+		}
+		if (entry->ipv6_5t_route.bfib1.state == BIND) {
+			pr_info("IPV6 Hash collision can not bind\n");
+			return -1;
+		}
+	} else if (del == 1) {
+		if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+		    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+		    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+		    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+		    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+		} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+			ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+			ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+			ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+			ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+			ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+			ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+			ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+			ppe_sportv6 = entry->ipv6_5t_route.sport;
+			ppe_dportv6 = entry->ipv6_5t_route.dport;
+			if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+			    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+			    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+			    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+			    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+			} else {
+				if (fe_feature & SEMI_AUTO_MODE)
+					pr_info("Ipv6 Entry delete : Entry Not found\n");
+				else if (fe_feature & MANUAL_MODE)
+					pr_info("Ipv6 hash collision hwnat can not found\n");
+				return -1;
+			}
+		}
+	}
+	return hash_index;
+}
+
+int hash_mib_ipv6(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	u32 t_hvt_31, t_hvt_63, t_hvt_95, t_hvt_sd;
+	u32 t_hvt_sd_23, t_hvt_sd_31_24, t_hash_32, t_hashs_16, t_ha16k, hash_index;
+	u32 ppe_saddr_127_96, ppe_saddr_95_64, ppe_saddr_63_32, ppe_saddr_31_0;
+	u32 ppe_daddr_127_96, ppe_daddr_95_64, ppe_daddr_63_32, ppe_daddr_31_0;
+	u32 ipv6_sip_127_96, ipv6_sip_95_64, ipv6_sip_63_32, ipv6_sip_31_0;
+	u32 ipv6_dip_127_96, ipv6_dip_95_64, ipv6_dip_63_32, ipv6_dip_31_0;
+	u32 sport, dport, ppe_sportv6, ppe_dportv6;
+
+	ipv6_sip_127_96 = key->ipv6_routing.sip0;
+	ipv6_sip_95_64 = key->ipv6_routing.sip1;
+	ipv6_sip_63_32 = key->ipv6_routing.sip2;
+	ipv6_sip_31_0 = key->ipv6_routing.sip3;
+	ipv6_dip_127_96 = key->ipv6_routing.dip0;
+	ipv6_dip_95_64 = key->ipv6_routing.dip1;
+	ipv6_dip_63_32 = key->ipv6_routing.dip2;
+	ipv6_dip_31_0 = key->ipv6_routing.dip3;
+	sport = key->ipv6_routing.sport;
+	dport = key->ipv6_routing.dport;
+
+	t_hvt_31 = ipv6_sip_31_0 ^ ipv6_dip_31_0 ^ (sport << 16 | dport);
+	t_hvt_63 = ipv6_sip_63_32 ^ ipv6_dip_63_32 ^ ipv6_dip_127_96;
+	t_hvt_95 = ipv6_sip_95_64 ^ ipv6_dip_95_64 ^ ipv6_sip_127_96;
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+	ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+	ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+	ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+	ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+	ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+	ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+	ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+	ppe_sportv6 = entry->ipv6_5t_route.sport;
+	ppe_dportv6 = entry->ipv6_5t_route.dport;
+
+	if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+	    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+	    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+	    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+	    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+		if (debug_level >= 1)
+			pr_info("mib: ipv6 entry found entry idx = %d\n", hash_index);
+	} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+			ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+			ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+			ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+			ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+			ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+			ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+			ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+			ppe_sportv6 = entry->ipv6_5t_route.sport;
+			ppe_dportv6 = entry->ipv6_5t_route.dport;
+			if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+			    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+			    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+			    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+			    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+				if (debug_level >= 1)
+					pr_info("mib: ipv6 entry found entry idx = %d\n", hash_index);
+			} else {
+				if (debug_level >= 1)
+					pr_info("mib: ipv6 entry not found\n");
+				return -1;
+			}
+	}
+
+	return hash_index;
+}
+#endif
+
+int hash_ipv4(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	u32 t_hvt_31;
+	u32 t_hvt_63;
+	u32 t_hvt_95;
+	u32 t_hvt_sd;
+
+	u32 t_hvt_sd_23;
+	u32 t_hvt_sd_31_24;
+	u32 t_hash_32;
+	u32 t_hashs_16;
+	u32 t_ha16k;
+	u32 hash_index;
+	u32 ppe_saddr, ppe_daddr, ppe_sport, ppe_dport, saddr, daddr, sport, dport;
+
+	saddr = key->ipv4_hnapt.sip;
+	daddr = key->ipv4_hnapt.dip;
+	sport = key->ipv4_hnapt.sport;
+	dport = key->ipv4_hnapt.dport;
+
+	t_hvt_31 = sport << 16 | dport;
+	t_hvt_63 = daddr;
+	t_hvt_95 = saddr;
+
+	/* pr_info("saddr = %x, daddr=%x, sport=%d, dport=%d\n", saddr, daddr, sport, dport); */
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr = entry->ipv4_hnapt.sip;
+	ppe_daddr = entry->ipv4_hnapt.dip;
+	ppe_sport = entry->ipv4_hnapt.sport;
+	ppe_dport = entry->ipv4_hnapt.dport;
+
+	if (del != 1) {
+		if (entry->ipv4_hnapt.bfib1.state == BIND) {
+			pr_info("Hash collision, hash index +1\n");
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+		}
+		if (entry->ipv4_hnapt.bfib1.state == BIND) {
+			pr_info("Hash collision can not bind\n");
+			return -1;
+		}
+	} else if (del == 1) {
+		if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+		    (sport == ppe_sport) && (dport == ppe_dport)) {
+		} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr = entry->ipv4_hnapt.sip;
+			ppe_daddr = entry->ipv4_hnapt.dip;
+			ppe_sport = entry->ipv4_hnapt.sport;
+			ppe_dport = entry->ipv4_hnapt.dport;
+			if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+			    (sport == ppe_sport) && (dport == ppe_dport)) {
+			} else {
+				if (fe_feature & SEMI_AUTO_MODE)
+					pr_info("hash collision hwnat can not found\n");
+				else if (fe_feature & MANUAL_MODE)
+					pr_info("Entry delete : Entry Not found\n");
+
+				return -1;
+			}
+		}
+	}
+	return hash_index;
+}
+
+int hash_mib_ipv4(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	u32 t_hvt_31;
+	u32 t_hvt_63;
+	u32 t_hvt_95;
+	u32 t_hvt_sd;
+
+	u32 t_hvt_sd_23;
+	u32 t_hvt_sd_31_24;
+	u32 t_hash_32;
+	u32 t_hashs_16;
+	u32 t_ha16k;
+	u32 hash_index;
+	u32 ppe_saddr, ppe_daddr, ppe_sport, ppe_dport, saddr, daddr, sport, dport;
+
+	saddr = key->ipv4_hnapt.sip;
+	daddr = key->ipv4_hnapt.dip;
+	sport = key->ipv4_hnapt.sport;
+	dport = key->ipv4_hnapt.dport;
+
+	t_hvt_31 = sport << 16 | dport;
+	t_hvt_63 = daddr;
+	t_hvt_95 = saddr;
+
+	/* pr_info("saddr = %x, daddr=%x, sport=%d, dport=%d\n", saddr, daddr, sport, dport); */
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr = entry->ipv4_hnapt.sip;
+	ppe_daddr = entry->ipv4_hnapt.dip;
+	ppe_sport = entry->ipv4_hnapt.sport;
+	ppe_dport = entry->ipv4_hnapt.dport;
+
+	if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+	    (sport == ppe_sport) && (dport == ppe_dport)) {
+		if (debug_level >= 1)
+			pr_info("mib: ipv4 entry entry : %d\n", hash_index);
+	} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr = entry->ipv4_hnapt.sip;
+			ppe_daddr = entry->ipv4_hnapt.dip;
+			ppe_sport = entry->ipv4_hnapt.sport;
+			ppe_dport = entry->ipv4_hnapt.dport;
+			if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+			    (sport == ppe_sport) && (dport == ppe_dport)) {
+				if (debug_level >= 1)
+					pr_info("mib: ipv4 entry entry : %d\n", hash_index);
+			} else {
+				if (debug_level >= 1)
+					pr_info("mib: ipv4 entry not found\n");
+				return -1;
+			}
+			return hash_index;
+	}
+
+	return hash_index;
+}
+
+int get_ppe_entry_idx(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	if ((key->pkt_type) == IPV4_NAPT)
+		return hash_ipv4(key, entry, del);
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+	else if ((key->pkt_type) == IPV6_ROUTING)
+		return hash_ipv6(key, entry, del);
+#endif
+	else
+		return -1;
+}
+
+int get_mib_entry_idx(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	if ((key->pkt_type) == IPV4_NAPT)
+		return hash_mib_ipv4(key, entry);
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+	else if ((key->pkt_type) == IPV6_ROUTING)
+		return hash_mib_ipv6(key, entry);
+#endif
+	else
+		return -1;
+}
+EXPORT_SYMBOL(get_mib_entry_idx);
+
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/api.h b/src/kernel/modules/netsys_driver/nat/hw_nat/api.h
new file mode 100644
index 0000000..547d971
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/api.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef _HWNAT_API
+#define _HWNAT_API
+
+extern struct foe_entry		*ppe_foe_base;
+
+int get_ppe_entry_idx(struct foe_pri_key *key, struct foe_entry *entry, int del);
+int get_mib_entry_idx(struct foe_pri_key *key, struct foe_entry *entry);
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb.c b/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb.c
new file mode 100755
index 0000000..094b90e
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb.c
@@ -0,0 +1,2405 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "ra_nat.h"
+#include "frame_engine.h"
+#include "foe_fdb.h"
+#include "hnat_ioctl.h"
+#include "util.h"
+#include "hnat_config.h"
+#include "hnat_define.h"
+#include "hnat_common.h"
+#include "hnat_dbg_proc.h"
+
+struct pkt_rx_parse_result ppe_parse_rx_result;
+
+extern struct foe_entry *ppe_foe_base;
+extern struct foe_entry *ppe1_foe_base;
+extern struct hwnat_interface hnat_if[64];
+extern int disabling_hwnat;
+
+#define PPE_MIB_TIMEOUT (4)
+static struct timer_list ppe_mib_timer;
+static bool ppe_mib_timer_start = false;
+extern bool ppe_mib_counter_en;
+
+uint32_t get_rxif_idx(struct foe_entry *entry) {
+
+	if (IS_IPV4_HNAT(entry)) {
+		return entry->ipv4_hnapt.rxif_idx;
+
+	} else if (IS_IPV4_HNAPT(entry)) {
+		return entry->ipv4_hnapt.rxif_idx;
+	}
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_1T_ROUTE(entry)) {
+			return entry->ipv6_1t_route.rxif_idx;
+
+		} else if (IS_IPV4_DSLITE(entry)) {
+
+			return entry->ipv4_dslite.rxif_idx;
+
+		} else if (IS_IPV6_3T_ROUTE(entry)) {
+
+			return entry->ipv6_3t_route.rxif_idx;
+
+		} else if (IS_IPV6_5T_ROUTE(entry)) {
+
+			return entry->ipv6_5t_route.rxif_idx;
+
+		} else if (IS_IPV6_6RD(entry)) {
+
+			return entry->ipv6_6rd.rxif_idx;
+		}
+	}
+
+	return entry->ipv4_hnapt.rxif_idx;
+
+}
+
+void set_rxif_idx(struct foe_entry *entry, u16 value) {
+
+	if (IS_IPV4_HNAT(entry)) {
+		entry->ipv4_hnapt.rxif_idx = value;
+
+	} else if (IS_IPV4_HNAPT(entry)) {
+		entry->ipv4_hnapt.rxif_idx = value;
+	}
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_1T_ROUTE(entry)) {
+			entry->ipv6_1t_route.rxif_idx = value;
+
+		} else if (IS_IPV4_DSLITE(entry)) {
+
+			entry->ipv4_dslite.rxif_idx = value;
+
+		} else if (IS_IPV6_3T_ROUTE(entry)) {
+
+			entry->ipv6_3t_route.rxif_idx = value;
+
+		} else if (IS_IPV6_5T_ROUTE(entry)) {
+
+			entry->ipv6_5t_route.rxif_idx = value;
+
+		} else if (IS_IPV6_6RD(entry)) {
+
+			entry->ipv6_6rd.rxif_idx = value;
+		}
+	}
+}
+
+
+uint32_t get_act_dp(struct foe_entry *entry) {
+
+	if (IS_IPV4_HNAT(entry) || IS_IPV4_HNAPT(entry)) {
+		return entry->ipv4_hnapt.act_dp;
+
+	}
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_1T_ROUTE(entry)) {
+			return entry->ipv6_1t_route.act_dp;
+
+		} else if (IS_IPV4_DSLITE(entry)) {
+
+			return entry->ipv4_dslite.act_dp;
+
+		} else if (IS_IPV6_3T_ROUTE(entry)) {
+
+			return entry->ipv6_3t_route.act_dp;
+
+		} else if (IS_IPV6_5T_ROUTE(entry)) {
+
+			return entry->ipv6_5t_route.act_dp;
+
+		} else if (IS_IPV6_6RD(entry)) {
+
+			return entry->ipv6_6rd.act_dp;
+		}
+	}
+
+	return entry->ipv4_hnapt.act_dp;
+
+}
+
+int is_foe_mcast_entry(struct foe_entry *entry) {
+
+#if defined(CONFIG_ODU_MCAST_SUPPORT)
+
+	if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) {
+		if (entry->ipv4_hnapt.dmac_hi[3] == 0x01)
+			return 1;
+	}
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_1T_ROUTE(entry)) {
+			if (entry->ipv6_1t_route.dmac_hi[3] == 0x33)
+				return 1;
+		} else if (IS_IPV4_DSLITE(entry)) {
+			if (entry->ipv4_dslite.dmac_hi[3] == 0x33)
+				return 1;
+		} else if (IS_IPV6_3T_ROUTE(entry)) {
+			if (entry->ipv6_3t_route.dmac_hi[3] == 0x33)
+				return 1;
+		} else if (IS_IPV6_5T_ROUTE(entry)) {
+			if (entry->ipv6_5t_route.dmac_hi[3] == 0x33)
+				return 1;
+		} else if (IS_IPV6_6RD(entry)) {
+			if (entry->ipv6_6rd.dmac_hi[3] == 0x33)
+				return 1;
+		}
+	}
+#endif /* CONFIG_ODU_MCAST_SUPPORT */
+	return 0;
+}
+
+
+
+void ppe_mib_dump_ppe0(unsigned int entry_num, unsigned long *pkt_cnt, unsigned long *byte_cnt)
+{
+	unsigned int byt_l = 0;
+	unsigned long long byt_h = 0;
+	unsigned int pkt_l = 0;
+
+	unsigned int long pkt_h = 0;
+	reg_write(MIB_SER_CR, entry_num | (1 << 16));
+	while (1) {
+		if (!((reg_read(MIB_SER_CR) & 0x10000) >> 16))
+			break;
+	}
+	/*make sure write dram correct*/
+	wmb();
+	byt_l = reg_read(MIB_SER_R0);	/* byte cnt bit31~ bit0 */
+	byt_h = reg_read(MIB_SER_R1) & 0xffff;	/* byte cnt bit47 ~ bit0 */
+	pkt_l = (reg_read(MIB_SER_R1) & 0xffff0000) >> 16;
+	pkt_h = reg_read(MIB_SER_R2) & 0xffffff;	/* packet cnt bit39 ~ bit16 */
+
+	*pkt_cnt = (pkt_h << 16) + pkt_l;
+	*byte_cnt = (byt_h << 32) + byt_l;
+}
+
+void ppe_mib_dump_ppe1(unsigned int entry_num, unsigned long *pkt_cnt, unsigned long *byte_cnt)
+{
+	unsigned int byt_l = 0;
+	unsigned long long byt_h = 0;
+	unsigned int pkt_l = 0;
+
+	unsigned int long pkt_h = 0;
+	reg_write(MIB_SER_CR_PPE1, entry_num | (1 << 16));
+	while (1) {
+		if (!((reg_read(MIB_SER_CR_PPE1) & 0x10000) >> 16))
+			break;
+	}
+	/*make sure write dram correct*/
+	wmb();
+	byt_l = reg_read(MIB_SER_R0_PPE1);	/* byte cnt bit31~ bit0 */
+	byt_h = reg_read(MIB_SER_R1_PPE1) & 0xffff;	/* byte cnt bit47 ~ bit0 */
+	pkt_l = (reg_read(MIB_SER_R1_PPE1) & 0xffff0000) >> 16;
+	pkt_h = reg_read(MIB_SER_R2_PPE1) & 0xffffff;	/* packet cnt bit39 ~ bit16 */
+
+	*pkt_cnt = (pkt_h << 16) + pkt_l;
+	*byte_cnt = (byt_h << 32) + byt_l;
+}
+
+static int ppe_entry_mib_update(struct foe_entry *entry, int hash_index, int ppe_index){
+
+	u8 sport, fport, mcast;
+	unsigned long pkt_cnt = 0, byte_cnt = 0;
+
+	if (entry->bfib1.state == BIND) {
+
+		fport = get_act_dp(entry);
+		sport = get_rxif_idx(entry);
+		mcast = is_foe_mcast_entry(entry);
+
+		if (ppe_index == 0)
+			ppe_mib_dump_ppe0(hash_index, &pkt_cnt, &byte_cnt);
+		else
+			ppe_mib_dump_ppe1(hash_index, &pkt_cnt, &byte_cnt);
+
+		if (fport >= 0 && fport < MAX_IF_NUM) {
+			hnat_if[fport].tx_byte_cnt += byte_cnt;
+			hnat_if[fport].tx_pkt_cnt += pkt_cnt;
+		}
+
+		if (sport >= 0 && sport < MAX_IF_NUM) {
+			hnat_if[sport].rx_byte_cnt += byte_cnt;
+			hnat_if[sport].rx_pkt_cnt += pkt_cnt;
+
+			if (mcast)
+				hnat_if[sport].rx_mcast_cnt += pkt_cnt;
+		}
+
+		if (debug_level == 10)
+			pr_notice("%s, sport(%d), fport(%d): hnat_if rx_byte:%llu, rx_pkt:%llu, tx_byte=%llu, tx_pkt=%llu, mcast=%llu\n",
+				__func__, sport, fport, hnat_if[sport].rx_byte_cnt, hnat_if[sport].rx_pkt_cnt,
+				hnat_if[fport].tx_byte_cnt, hnat_if[fport].tx_pkt_cnt, hnat_if[sport].rx_mcast_cnt);
+		return 1;
+	}
+	return 0;
+}
+
+static int ppe_mib_update(void) {
+
+
+	int hash_index;
+	struct foe_entry *entry, *entry1;
+	int bind_count = 0;
+
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+
+		entry = &ppe_foe_base[hash_index];
+		bind_count += ppe_entry_mib_update(entry, hash_index, 0);
+
+		entry1 = &ppe1_foe_base[hash_index];
+		bind_count += ppe_entry_mib_update(entry1, hash_index, 1);
+	}
+
+	return bind_count;
+}
+
+
+static void ppe_mib_update_cycle(struct timer_list *t){
+
+
+	int bind_count = ppe_mib_update();
+
+	/* setup next timer */
+	if (bind_count > 0) {
+		mod_timer(&ppe_mib_timer, jiffies + HZ * PPE_MIB_TIMEOUT);
+		ppe_mib_timer_start = true;
+
+		if (debug_level == 10)
+ 			pr_info("%s, start timer\n", __func__);
+	} else {
+		ppe_mib_timer_start = false;
+		pr_info("%s, timer is stopped\n", __func__);
+	}
+}
+
+
+void ppe_init_mib_counter(void) {
+
+	/* setup mib timer */
+	timer_setup(&ppe_mib_timer, ppe_mib_update_cycle, 0);
+}
+
+
+void ppe_start_mib_timer(struct sk_buff *skb, struct foe_entry *entry) {
+
+	if (ppe_mib_counter_en && !ppe_mib_timer_start) {
+
+		/* start the timer to update ccmni mib */
+		mod_timer(&ppe_mib_timer, jiffies + HZ * PPE_MIB_TIMEOUT);
+		ppe_mib_timer_start = true;
+
+		if (debug_level == 10)
+ 			pr_info("%s, start timer\n", __func__);
+	}
+}
+
+
+void ppe_reset_dev_mib(struct net_device *dev) {
+	int i;
+
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == dev) {
+			// clear
+			hnat_if[i].rx_byte_cnt = 0;
+			hnat_if[i].tx_byte_cnt = 0;
+
+			hnat_if[i].rx_pkt_cnt = 0;
+			hnat_if[i].tx_pkt_cnt= 0;
+			hnat_if[i].rx_mcast_cnt = 0;
+			break;
+		}
+	}
+}
+
+
+void ppe_get_mib_from_ppe(int dev_idx, struct rtnl_link_stats64 *stats) {
+
+	u8 sport, fport, sport1, fport1;
+	int hash_index;
+	int mcast, mcast1;
+	struct foe_entry *entry, *entry1;
+
+	/* mcast is rx pkt */
+	unsigned long total_tx_pkt = 0, total_tx_byte = 0;
+	unsigned long total_rx_pkt = 0, total_rx_byte = 0, mcast_rx_pkt = 0;
+	unsigned long pkt_cnt = 0, byte_cnt = 0, pkt_cnt1 = 0, byte_cnt1 = 0;
+
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+
+		entry = &ppe_foe_base[hash_index];
+		entry1 = &ppe1_foe_base[hash_index];
+
+		if (entry->bfib1.state == BIND) {
+
+			fport = get_act_dp(entry);
+			sport = get_rxif_idx(entry);
+			mcast = is_foe_mcast_entry(entry);
+
+			if (dev_idx == fport || dev_idx == sport) {
+				ppe_mib_dump_ppe0(hash_index, &pkt_cnt, &byte_cnt);
+				if (dev_idx == fport) {
+					total_tx_pkt += pkt_cnt;
+					total_tx_byte += byte_cnt;
+				}
+
+				if (dev_idx == sport) {
+					total_rx_pkt += pkt_cnt;
+					total_rx_byte += byte_cnt;
+					if (mcast)
+						mcast_rx_pkt += pkt_cnt;
+				}
+			}
+		}
+
+		if (entry1->bfib1.state == BIND) {
+
+			fport1 = get_act_dp(entry1);
+			sport1 = get_rxif_idx(entry1);
+			mcast1 = is_foe_mcast_entry(entry1);
+
+			if (dev_idx == fport1 || dev_idx == sport1) {
+				ppe_mib_dump_ppe1(hash_index, &pkt_cnt1, &byte_cnt1);
+				if (dev_idx == fport1) {
+					total_tx_pkt += pkt_cnt1;
+					total_tx_byte += byte_cnt1;
+				}
+
+				if (dev_idx == sport1) {
+					total_rx_pkt += pkt_cnt1;
+					total_rx_byte += byte_cnt1;
+					if (mcast1)
+						mcast_rx_pkt += pkt_cnt1;
+				}
+			}
+		}
+	}
+
+
+	stats->rx_packets = total_rx_pkt;
+	stats->tx_packets = total_tx_pkt;
+	stats->rx_bytes = total_rx_byte;
+	stats->tx_bytes = total_tx_byte;
+	stats->multicast = mcast_rx_pkt;
+
+}
+
+
+int ppe_get_dev_stats_handler(struct net_device *dev, struct rtnl_link_stats64 *stats) {
+
+
+	u8 i, dev_idx, match;
+
+	if (disabling_hwnat) {
+		if (debug_level == 1)
+ 			pr_notice("%s, disabling_hwnat:%d\n", __func__, disabling_hwnat);
+		return 0; /* fail */
+	}
+
+	dev_idx = 1;
+	match = 0;
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == dev) {
+			dev_idx = i;
+			match = 1;
+			break;
+		}
+
+	}
+	if (match == 0) {
+		stats->rx_packets = 0;
+		stats->tx_packets = 0;
+		stats->rx_bytes = 0;
+		stats->tx_bytes = 0;
+		stats->multicast = 0;
+		return 1; /* succeed */
+	}
+
+	ppe_get_mib_from_ppe(dev_idx, stats);
+
+	if (debug_level == 10)
+		pr_notice("%s, if(%u):%s: hnat_if rx_pkt:%llu, tx_pkt:%llu, rx_byte=%llu, tx_byte=%llu, mcast:%llu\n",
+			__func__, dev_idx, dev->name, hnat_if[dev_idx].rx_pkt_cnt, hnat_if[dev_idx].tx_pkt_cnt,
+			hnat_if[dev_idx].rx_byte_cnt, hnat_if[dev_idx].tx_byte_cnt, hnat_if[dev_idx].rx_mcast_cnt);
+
+	stats->rx_packets += hnat_if[dev_idx].rx_pkt_cnt;
+	stats->tx_packets += hnat_if[dev_idx].tx_pkt_cnt;
+	stats->rx_bytes += hnat_if[dev_idx].rx_byte_cnt;
+	stats->tx_bytes += hnat_if[dev_idx].tx_byte_cnt;
+	stats->multicast += hnat_if[dev_idx].rx_mcast_cnt;
+
+	//reset counter
+	hnat_if[dev_idx].rx_pkt_cnt = 0;
+	hnat_if[dev_idx].tx_pkt_cnt = 0;
+	hnat_if[dev_idx].rx_byte_cnt = 0;
+	hnat_if[dev_idx].tx_byte_cnt = 0;
+	hnat_if[dev_idx].rx_mcast_cnt = 0;
+
+	if (debug_level == 10)
+		pr_notice("%s, if(%u):%s: rx_pkt:%llu, tx_pkt:%llu, rx_byte=%llu, tx_byte=%llu, mcast:%llu\n",
+			__func__, dev_idx, dev->name, stats->rx_packets, stats->tx_packets,
+			stats->rx_bytes, stats->tx_bytes, stats->multicast);
+
+	return 1; /* succeed */
+
+}
+
+
+#define DD \
+{\
+pr_notice("%s %d\n", __func__, __LINE__); \
+}
+
+/* 4          2         0 */
+/* +----------+---------+ */
+/* |      DMAC[47:16]   | */
+/* +--------------------+ */
+/* |DMAC[15:0]| 2nd VID | */
+/* +----------+---------+ */
+/* 4          2         0 */
+/* +----------+---------+ */
+/* |      SMAC[47:16]   | */
+/* +--------------------+ */
+/* |SMAC[15:0]| PPPOE ID| */
+/* +----------+---------+ */
+/* Ex: */
+/* Mac=01:22:33:44:55:66 */
+/* 4          2         0 */
+/* +----------+---------+ */
+/* |     01:22:33:44    | */
+/* +--------------------+ */
+/* |  55:66   | PPPOE ID| */
+/* +----------+---------+ */
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+int hash_ipv6(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	u32 t_hvt_31, t_hvt_63, t_hvt_95, t_hvt_sd;
+	u32 t_hvt_sd_23, t_hvt_sd_31_24, t_hash_32, t_hashs_16, t_ha16k, hash_index;
+	u32 ppe_saddr_127_96, ppe_saddr_95_64, ppe_saddr_63_32, ppe_saddr_31_0;
+	u32 ppe_daddr_127_96, ppe_daddr_95_64, ppe_daddr_63_32, ppe_daddr_31_0;
+	u32 ipv6_sip_127_96, ipv6_sip_95_64, ipv6_sip_63_32, ipv6_sip_31_0;
+	u32 ipv6_dip_127_96, ipv6_dip_95_64, ipv6_dip_63_32, ipv6_dip_31_0;
+	u32 sport, dport, ppe_sportv6, ppe_dportv6;
+
+	ipv6_sip_127_96 = key->ipv6_routing.sip0;
+	ipv6_sip_95_64 = key->ipv6_routing.sip1;
+	ipv6_sip_63_32 = key->ipv6_routing.sip2;
+	ipv6_sip_31_0 = key->ipv6_routing.sip3;
+	ipv6_dip_127_96 = key->ipv6_routing.dip0;
+	ipv6_dip_95_64 = key->ipv6_routing.dip1;
+	ipv6_dip_63_32 = key->ipv6_routing.dip2;
+	ipv6_dip_31_0 = key->ipv6_routing.dip3;
+	sport = key->ipv6_routing.sport;
+	dport = key->ipv6_routing.dport;
+
+	t_hvt_31 = ipv6_sip_31_0 ^ ipv6_dip_31_0 ^ (sport << 16 | dport);
+	t_hvt_63 = ipv6_sip_63_32 ^ ipv6_dip_63_32 ^ ipv6_dip_127_96;
+	t_hvt_95 = ipv6_sip_95_64 ^ ipv6_dip_95_64 ^ ipv6_sip_127_96;
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+	ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+	ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+	ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+	ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+	ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+	ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+	ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+	ppe_sportv6 = entry->ipv6_5t_route.sport;
+	ppe_dportv6 = entry->ipv6_5t_route.dport;
+	if (del != 1) {
+		if (entry->ipv6_5t_route.bfib1.state == BIND) {
+			pr_notice("IPV6 Hash collision, hash index +1\n");
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+		}
+		if (entry->ipv6_5t_route.bfib1.state == BIND) {
+			pr_notice("IPV6 Hash collision can not bind\n");
+			return -1;
+		}
+	} else if (del == 1) {
+		if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+		    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+		    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+		    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+		    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+		} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+			ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+			ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+			ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+			ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+			ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+			ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+			ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+			ppe_sportv6 = entry->ipv6_5t_route.sport;
+			ppe_dportv6 = entry->ipv6_5t_route.dport;
+			if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+			    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+			    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+			    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+			    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+			} else {
+				if (fe_feature & SEMI_AUTO_MODE)
+					pr_notice("Ipv6 Entry delete : Entry Not found\n");
+				else if (fe_feature & MANUAL_MODE)
+					pr_notice("Ipv6 hash collision hwnat can not found\n");
+				return -1;
+			}
+		}
+	}
+	return hash_index;
+}
+
+int hash_mib_ipv6(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	u32 t_hvt_31, t_hvt_63, t_hvt_95, t_hvt_sd;
+	u32 t_hvt_sd_23, t_hvt_sd_31_24, t_hash_32, t_hashs_16, t_ha16k, hash_index;
+	u32 ppe_saddr_127_96, ppe_saddr_95_64, ppe_saddr_63_32, ppe_saddr_31_0;
+	u32 ppe_daddr_127_96, ppe_daddr_95_64, ppe_daddr_63_32, ppe_daddr_31_0;
+	u32 ipv6_sip_127_96, ipv6_sip_95_64, ipv6_sip_63_32, ipv6_sip_31_0;
+	u32 ipv6_dip_127_96, ipv6_dip_95_64, ipv6_dip_63_32, ipv6_dip_31_0;
+	u32 sport, dport, ppe_sportv6, ppe_dportv6;
+
+	ipv6_sip_127_96 = key->ipv6_routing.sip0;
+	ipv6_sip_95_64 = key->ipv6_routing.sip1;
+	ipv6_sip_63_32 = key->ipv6_routing.sip2;
+	ipv6_sip_31_0 = key->ipv6_routing.sip3;
+	ipv6_dip_127_96 = key->ipv6_routing.dip0;
+	ipv6_dip_95_64 = key->ipv6_routing.dip1;
+	ipv6_dip_63_32 = key->ipv6_routing.dip2;
+	ipv6_dip_31_0 = key->ipv6_routing.dip3;
+	sport = key->ipv6_routing.sport;
+	dport = key->ipv6_routing.dport;
+
+	t_hvt_31 = ipv6_sip_31_0 ^ ipv6_dip_31_0 ^ (sport << 16 | dport);
+	t_hvt_63 = ipv6_sip_63_32 ^ ipv6_dip_63_32 ^ ipv6_dip_127_96;
+	t_hvt_95 = ipv6_sip_95_64 ^ ipv6_dip_95_64 ^ ipv6_sip_127_96;
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+	ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+	ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+	ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+	ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+	ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+	ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+	ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+	ppe_sportv6 = entry->ipv6_5t_route.sport;
+	ppe_dportv6 = entry->ipv6_5t_route.dport;
+
+	if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+	    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+	    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+	    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+	    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+		if (debug_level >= 1)
+			pr_notice("mib: ipv6 entry found entry idx = %d\n", hash_index);
+	} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+			ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+			ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+			ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+			ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+			ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+			ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+			ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+			ppe_sportv6 = entry->ipv6_5t_route.sport;
+			ppe_dportv6 = entry->ipv6_5t_route.dport;
+			if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+			    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+			    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+			    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+			    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+				if (debug_level >= 1)
+					pr_notice("mib: ipv6 entry found entry idx = %d\n", hash_index);
+			} else {
+				if (debug_level >= 1)
+					pr_notice("mib: ipv6 entry not found\n");
+				return -1;
+			}
+	}
+
+	return hash_index;
+}
+#endif
+
+int hash_ipv4(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	u32 t_hvt_31;
+	u32 t_hvt_63;
+	u32 t_hvt_95;
+	u32 t_hvt_sd;
+
+	u32 t_hvt_sd_23;
+	u32 t_hvt_sd_31_24;
+	u32 t_hash_32;
+	u32 t_hashs_16;
+	u32 t_ha16k;
+	u32 hash_index;
+	u32 ppe_saddr, ppe_daddr, ppe_sport, ppe_dport, saddr, daddr, sport, dport;
+
+	saddr = key->ipv4_hnapt.sip;
+	daddr = key->ipv4_hnapt.dip;
+	sport = key->ipv4_hnapt.sport;
+	dport = key->ipv4_hnapt.dport;
+
+	t_hvt_31 = sport << 16 | dport;
+	t_hvt_63 = daddr;
+	t_hvt_95 = saddr;
+
+	/* pr_info("saddr = %x, daddr=%x, sport=%d, dport=%d\n", saddr, daddr, sport, dport); */
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr = entry->ipv4_hnapt.sip;
+	ppe_daddr = entry->ipv4_hnapt.dip;
+	ppe_sport = entry->ipv4_hnapt.sport;
+	ppe_dport = entry->ipv4_hnapt.dport;
+
+	if (del != 1) {
+		if (entry->ipv4_hnapt.bfib1.state == BIND) {
+			pr_notice("Hash collision, hash index +1\n");
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+		}
+		if (entry->ipv4_hnapt.bfib1.state == BIND) {
+			pr_notice("Hash collision can not bind\n");
+			return -1;
+		}
+	} else if (del == 1) {
+		if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+		    (sport == ppe_sport) && (dport == ppe_dport)) {
+		} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr = entry->ipv4_hnapt.sip;
+			ppe_daddr = entry->ipv4_hnapt.dip;
+			ppe_sport = entry->ipv4_hnapt.sport;
+			ppe_dport = entry->ipv4_hnapt.dport;
+			if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+			    (sport == ppe_sport) && (dport == ppe_dport)) {
+			} else {
+				if (fe_feature & SEMI_AUTO_MODE)
+					pr_notice("hash collision hwnat can not found\n");
+				else if (fe_feature & MANUAL_MODE)
+					pr_notice("Entry delete : Entry Not found\n");
+
+				return -1;
+			}
+		}
+	}
+	return hash_index;
+}
+
+int hash_mib_ipv4(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	u32 t_hvt_31;
+	u32 t_hvt_63;
+	u32 t_hvt_95;
+	u32 t_hvt_sd;
+
+	u32 t_hvt_sd_23;
+	u32 t_hvt_sd_31_24;
+	u32 t_hash_32;
+	u32 t_hashs_16;
+	u32 t_ha16k;
+	u32 hash_index;
+	u32 ppe_saddr, ppe_daddr, ppe_sport, ppe_dport, saddr, daddr, sport, dport;
+
+	saddr = key->ipv4_hnapt.sip;
+	daddr = key->ipv4_hnapt.dip;
+	sport = key->ipv4_hnapt.sport;
+	dport = key->ipv4_hnapt.dport;
+
+	t_hvt_31 = sport << 16 | dport;
+	t_hvt_63 = daddr;
+	t_hvt_95 = saddr;
+
+	/* pr_info("saddr = %x, daddr=%x, sport=%d, dport=%d\n", saddr, daddr, sport, dport); */
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr = entry->ipv4_hnapt.sip;
+	ppe_daddr = entry->ipv4_hnapt.dip;
+	ppe_sport = entry->ipv4_hnapt.sport;
+	ppe_dport = entry->ipv4_hnapt.dport;
+
+	if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+	    (sport == ppe_sport) && (dport == ppe_dport)) {
+		if (debug_level >= 1)
+			pr_notice("mib: ipv4 entry entry : %d\n", hash_index);
+	} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr = entry->ipv4_hnapt.sip;
+			ppe_daddr = entry->ipv4_hnapt.dip;
+			ppe_sport = entry->ipv4_hnapt.sport;
+			ppe_dport = entry->ipv4_hnapt.dport;
+			if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+			    (sport == ppe_sport) && (dport == ppe_dport)) {
+				if (debug_level >= 1)
+					pr_notice("mib: ipv4 entry entry : %d\n", hash_index);
+			} else {
+				if (debug_level >= 1)
+					pr_notice("mib: ipv4 entry not found\n");
+				return -1;
+			}
+			return hash_index;
+	}
+
+	return hash_index;
+}
+
+int get_ppe_entry_idx(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	if ((key->pkt_type) == IPV4_NAPT)
+		return hash_ipv4(key, entry, del);
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+	else if ((key->pkt_type) == IPV6_ROUTING)
+		return hash_ipv6(key, entry, del);
+#endif
+	else
+		return -1;
+}
+
+int get_mib_entry_idx(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	if ((key->pkt_type) == IPV4_NAPT)
+		return hash_mib_ipv4(key, entry);
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+	else if ((key->pkt_type) == IPV6_ROUTING)
+		return hash_mib_ipv6(key, entry);
+#endif
+	else
+		return -1;
+}
+EXPORT_SYMBOL(get_mib_entry_idx);
+
+void foe_set_mac_hi_info(u8 *dst, uint8_t *src)
+{
+	dst[3] = src[0];
+	dst[2] = src[1];
+	dst[1] = src[2];
+	dst[0] = src[3];
+}
+
+void foe_set_mac_lo_info(u8 *dst, uint8_t *src)
+{
+	dst[1] = src[4];
+	dst[0] = src[5];
+}
+
+static int is_request_done(void)
+{
+	int count = 1000;
+
+	/* waiting for 1sec to make sure action was finished */
+	do {
+		if (((reg_read(CAH_CTRL) >> 8) & 0x1) == 0)
+			return 1;
+		usleep_range(1000, 1100);
+	} while (--count);
+
+	return 0;
+}
+
+#define MAX_CACHE_LINE_NUM		32
+int foe_dump_cache_entry(void)
+{
+	int line = 0;
+	int state = 0;
+	int tag = 0;
+	int cah_en = 0;
+	int i = 0;
+
+	pr_notice("foe_dump_cache_entry!!!!\n");
+	cah_en = reg_read(CAH_CTRL) & 0x1;
+
+	if (!cah_en) {
+		pr_notice("Cache is not enabled\n");
+		return 0;
+	}
+
+	/* cache disable */
+	reg_modify_bits(CAH_CTRL, 0, 0, 1);
+
+	pr_notice(" No--|---State---|----Tag-----\n");
+	pr_notice("-----+-----------+------------\n");
+	for (line = 0; line < MAX_CACHE_LINE_NUM; line++) {
+		/* set line number */
+		reg_modify_bits(CAH_LINE_RW, line, 0, 15);
+
+		/* OFFSET_RW = 0x1F (Get Entry Number) */
+		reg_modify_bits(CAH_LINE_RW, 0x1F, 16, 8);
+
+		/* software access cache command = read */
+		reg_modify_bits(CAH_CTRL, 2, 12, 2);
+
+		/* trigger software access cache request */
+		reg_modify_bits(CAH_CTRL, 1, 8, 1);
+
+		if (is_request_done()) {
+			tag = (reg_read(CAH_RDATA) & 0xFFFF);
+			state = ((reg_read(CAH_RDATA) >> 16) & 0x3);
+			pr_notice("%04d | %s   | %05d\n", line,
+				 (state == 3) ? " Lock  " :
+				 (state == 2) ? " Dirty " :
+				 (state == 1) ? " Valid " : "Invalid", tag);
+		} else {
+			pr_notice("%s is timeout (%d)\n", __func__, line);
+		}
+
+		/* software access cache command = read */
+		reg_modify_bits(CAH_CTRL, 3, 12, 2);
+
+		reg_write(CAH_WDATA, 0);
+
+		/* trigger software access cache request */
+		reg_modify_bits(CAH_CTRL, 1, 8, 1);
+
+		if (!is_request_done())
+			pr_notice("%s is timeout (%d)\n", __func__, line);
+		/* dump first 16B for each foe entry */
+		pr_notice("==========<Flow Table Entry=%d >===============\n", tag);
+		for (i = 0; i < 16; i++) {
+			reg_modify_bits(CAH_LINE_RW, i, 16, 8);
+
+			/* software access cache command = read */
+			reg_modify_bits(CAH_CTRL, 2, 12, 2);
+
+			/* trigger software access cache request */
+			reg_modify_bits(CAH_CTRL, 1, 8, 1);
+
+			if (is_request_done())
+				pr_notice("%02d  %08X\n", i, reg_read(CAH_RDATA));
+			else
+				pr_notice("%s is timeout (%d)\n", __func__, line);
+
+			/* software access cache command = write */
+			reg_modify_bits(CAH_CTRL, 3, 12, 2);
+
+			reg_write(CAH_WDATA, 0);
+
+			/* trigger software access cache request */
+			reg_modify_bits(CAH_CTRL, 1, 8, 1);
+
+			if (!is_request_done())
+				pr_notice("%s is timeout (%d)\n", __func__, line);
+		}
+		pr_notice("=========================================\n");
+	}
+
+	/* clear cache table before enabling cache */
+	reg_modify_bits(CAH_CTRL, 1, 9, 1);
+	reg_modify_bits(CAH_CTRL, 0, 9, 1);
+
+	/* cache enable */
+	reg_modify_bits(CAH_CTRL, 1, 0, 1);
+
+	return 1;
+}
+
+int ppe_force_port(struct foe_entry *entry)
+{
+	if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) {
+#if defined(CONFIG_HNAT_V1)
+		return (entry->ipv4_hnapt.info_blk2 >> 5 & 0x7) + (((entry->ipv4_hnapt.info_blk2 >> 14) & 0x1) << 3);
+#elif defined (CONFIG_HNAT_V2)
+		return (entry->ipv4_hnapt.info_blk2 >> 9 & 0xf);
+#else
+		return (entry->ipv4_hnapt.info_blk2 >> 5 & 0x7);
+#endif
+	} else {
+#if defined(CONFIG_HNAT_V1)
+		return (entry->ipv6_5t_route.info_blk2 >> 5 & 0x7) + (((entry->ipv6_5t_route.info_blk2 >> 14) & 0x1) << 3);
+#elif defined (CONFIG_HNAT_V2)
+		return (entry->ipv6_5t_route.info_blk2 >> 9 & 0xf);
+#else
+		return (entry->ipv6_5t_route.info_blk2 >> 5 & 0x7);
+#endif
+	}
+}
+
+int ppe_qid(struct foe_entry *entry)
+{
+	if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) {
+#if defined(CONFIG_HNAT_V1)
+		return (entry->ipv4_hnapt.iblk2.qid +
+				  ((entry->ipv4_hnapt.iblk2.qid1 & 0x03) << 4));
+#elif defined (CONFIG_HNAT_V2)
+		return (entry->ipv4_hnapt.iblk2.qid);
+#else
+		return (entry->ipv4_hnapt.iblk2.qid);
+#endif
+	} else {
+#if defined(CONFIG_HNAT_V1)
+		return (entry->ipv6_5t_route.iblk2.qid + ((entry->ipv6_5t_route.iblk2.qid1 & 0x03) << 4));
+#elif defined (CONFIG_HNAT_V2)
+		return (entry->ipv6_5t_route.iblk2.qid);
+#else
+		return (entry->ipv6_5t_route.iblk2.qid);
+#endif
+	}
+}
+
+int ppe_fqos(struct foe_entry *entry)
+{
+	if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry))
+		return entry->ipv4_hnapt.info_blk2 >> 8 & 0x1;
+	else
+		return entry->ipv6_5t_route.info_blk2 >> 8 & 0x1;
+}
+
+int info_blk2(struct foe_entry *entry)
+{
+	if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry))
+		return entry->ipv4_hnapt.info_blk2;
+	else
+		return entry->ipv6_5t_route.info_blk2;
+}
+void foe_dump_entry(uint32_t index, struct foe_entry *entry)
+{
+	u32 i = 0;
+	u32 print_cnt;
+	u32 *p = (uint32_t *)entry;
+
+	NAT_PRINT("==========<Flow Table Entry=%d (%p)>===============\n", index, entry);
+	if (debug_level >= 2) {
+		print_cnt = 24;
+		for (i = 0; i < print_cnt; i++)
+			NAT_PRINT("%02d: %08X\n", i, *(p + i));
+	}
+	NAT_PRINT("-----------------<Flow Info>------------------\n");
+	NAT_PRINT("Information Block 1: %08X\n", entry->ipv4_hnapt.info_blk1);
+	NAT_PRINT("Information Block 2=%x (FP=%d FQOS=%d QID=%d)",
+				  info_blk2(entry),
+				  ppe_force_port(entry),
+				  ppe_fqos(entry),ppe_qid(entry));
+
+	if (IS_IPV4_HNAPT(entry)) {
+		NAT_PRINT("Create IPv4 HNAPT entry\n");
+		NAT_PRINT
+		    ("IPv4 Org IP/Port: %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_hnapt.sip), IP_FORMAT2(entry->ipv4_hnapt.sip),
+		     IP_FORMAT1(entry->ipv4_hnapt.sip), IP_FORMAT0(entry->ipv4_hnapt.sip),
+		     entry->ipv4_hnapt.sport,
+		     IP_FORMAT3(entry->ipv4_hnapt.dip), IP_FORMAT2(entry->ipv4_hnapt.dip),
+		     IP_FORMAT1(entry->ipv4_hnapt.dip), IP_FORMAT0(entry->ipv4_hnapt.dip),
+		     entry->ipv4_hnapt.dport);
+		NAT_PRINT
+		    ("IPv4 New IP/Port: %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_hnapt.new_sip), IP_FORMAT2(entry->ipv4_hnapt.new_sip),
+		     IP_FORMAT1(entry->ipv4_hnapt.new_sip), IP_FORMAT0(entry->ipv4_hnapt.new_sip),
+		     entry->ipv4_hnapt.new_sport,
+		     IP_FORMAT3(entry->ipv4_hnapt.new_dip), IP_FORMAT2(entry->ipv4_hnapt.new_dip),
+		     IP_FORMAT1(entry->ipv4_hnapt.new_dip), IP_FORMAT0(entry->ipv4_hnapt.new_dip),
+		     entry->ipv4_hnapt.new_dport);
+	} else if (IS_IPV4_HNAT(entry)) {
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv4_hnapt.info_blk2);
+		NAT_PRINT("Create IPv4 HNAT entry\n");
+		NAT_PRINT("IPv4 Org IP: %u.%u.%u.%u->%u.%u.%u.%u\n",
+			  IP_FORMAT3(entry->ipv4_hnapt.sip), IP_FORMAT2(entry->ipv4_hnapt.sip),
+			  IP_FORMAT1(entry->ipv4_hnapt.sip), IP_FORMAT0(entry->ipv4_hnapt.sip),
+			  IP_FORMAT3(entry->ipv4_hnapt.dip), IP_FORMAT2(entry->ipv4_hnapt.dip),
+			  IP_FORMAT1(entry->ipv4_hnapt.dip), IP_FORMAT0(entry->ipv4_hnapt.dip));
+		NAT_PRINT("IPv4 New IP: %u.%u.%u.%u->%u.%u.%u.%u\n",
+			  IP_FORMAT3(entry->ipv4_hnapt.new_sip), IP_FORMAT2(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT1(entry->ipv4_hnapt.new_sip), IP_FORMAT0(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT3(entry->ipv4_hnapt.new_dip), IP_FORMAT2(entry->ipv4_hnapt.new_dip),
+			  IP_FORMAT1(entry->ipv4_hnapt.new_dip), IP_FORMAT0(entry->ipv4_hnapt.new_dip));
+	}
+	if (IS_IPV6_1T_ROUTE(entry)) {
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_1t_route.info_blk2);
+		NAT_PRINT("Create IPv6 Route entry\n");
+		NAT_PRINT("Destination IPv6: %08X:%08X:%08X:%08X",
+			  entry->ipv6_1t_route.ipv6_dip3, entry->ipv6_1t_route.ipv6_dip2,
+			  entry->ipv6_1t_route.ipv6_dip1, entry->ipv6_1t_route.ipv6_dip0);
+	} else if (IS_IPV4_DSLITE(entry)) {
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv4_dslite.info_blk2);
+		NAT_PRINT("Create IPv4 Ds-Lite entry\n");
+		NAT_PRINT
+		    ("IPv4 Ds-Lite: %u.%u.%u.%u.%d->%u.%u.%u.%u:%d\n ",
+		     IP_FORMAT3(entry->ipv4_dslite.sip), IP_FORMAT2(entry->ipv4_dslite.sip),
+		     IP_FORMAT1(entry->ipv4_dslite.sip), IP_FORMAT0(entry->ipv4_dslite.sip),
+		     entry->ipv4_dslite.sport,
+		     IP_FORMAT3(entry->ipv4_dslite.dip), IP_FORMAT2(entry->ipv4_dslite.dip),
+		     IP_FORMAT1(entry->ipv4_dslite.dip), IP_FORMAT0(entry->ipv4_dslite.dip),
+		     entry->ipv4_dslite.dport);
+		NAT_PRINT("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			  entry->ipv4_dslite.tunnel_sipv6_0, entry->ipv4_dslite.tunnel_sipv6_1,
+			  entry->ipv4_dslite.tunnel_sipv6_2, entry->ipv4_dslite.tunnel_sipv6_3,
+			  entry->ipv4_dslite.tunnel_dipv6_0, entry->ipv4_dslite.tunnel_dipv6_1,
+			  entry->ipv4_dslite.tunnel_dipv6_2, entry->ipv4_dslite.tunnel_dipv6_3);
+	} else if (IS_IPV4_MAPE(entry)){
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv4_dslite.info_blk2);
+		NAT_PRINT("Create IPv4 MAP-E entry\n");
+		NAT_PRINT
+		    ("IPv4 MAPE: %u.%u.%u.%u.%d->%u.%u.%u.%u:%d\n ",
+		     IP_FORMAT3(entry->ipv4_dslite.sip), IP_FORMAT2(entry->ipv4_dslite.sip),
+		     IP_FORMAT1(entry->ipv4_dslite.sip), IP_FORMAT0(entry->ipv4_dslite.sip),
+		     entry->ipv4_dslite.sport,
+		     IP_FORMAT3(entry->ipv4_dslite.dip), IP_FORMAT2(entry->ipv4_dslite.dip),
+		     IP_FORMAT1(entry->ipv4_dslite.dip), IP_FORMAT0(entry->ipv4_dslite.dip),
+		     entry->ipv4_dslite.dport);
+		NAT_PRINT("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			  entry->ipv4_dslite.tunnel_sipv6_0, entry->ipv4_dslite.tunnel_sipv6_1,
+			  entry->ipv4_dslite.tunnel_sipv6_2, entry->ipv4_dslite.tunnel_sipv6_3,
+			  entry->ipv4_dslite.tunnel_dipv6_0, entry->ipv4_dslite.tunnel_dipv6_1,
+			  entry->ipv4_dslite.tunnel_dipv6_2, entry->ipv4_dslite.tunnel_dipv6_3);
+		NAT_PRINT
+		    ("IPv4 Org IP/Port: %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_dslite.sip), IP_FORMAT2(entry->ipv4_dslite.sip),
+		     IP_FORMAT1(entry->ipv4_dslite.sip), IP_FORMAT0(entry->ipv4_dslite.sip),
+		     entry->ipv4_dslite.sport,
+		     IP_FORMAT3(entry->ipv4_dslite.dip), IP_FORMAT2(entry->ipv4_dslite.dip),
+		     IP_FORMAT1(entry->ipv4_dslite.dip), IP_FORMAT0(entry->ipv4_dslite.dip),
+		     entry->ipv4_dslite.dport);
+		NAT_PRINT
+		    ("IPv4 New IP/Port: %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_dslite.new_sip), IP_FORMAT2(entry->ipv4_dslite.new_sip),
+		     IP_FORMAT1(entry->ipv4_dslite.new_sip), IP_FORMAT0(entry->ipv4_dslite.new_sip),
+		     entry->ipv4_dslite.new_sport,
+		     IP_FORMAT3(entry->ipv4_dslite.new_dip), IP_FORMAT2(entry->ipv4_dslite.new_dip),
+		     IP_FORMAT1(entry->ipv4_dslite.new_dip), IP_FORMAT0(entry->ipv4_dslite.new_dip),
+		     entry->ipv4_dslite.new_dport);
+ 	}else if (IS_IPV4_MAPT(entry)){
+#if(0)
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv4_dslite.info_blk2);
+		NAT_PRINT("Create IPv4 MAP-T entry (4 to 6)\n");
+		NAT_PRINT
+		    ("IPv4 MAPE: %u.%u.%u.%u.%d->%u.%u.%u.%u:%d\n ",
+		     IP_FORMAT3(entry->ipv4_dslite.sip), IP_FORMAT2(entry->ipv4_dslite.sip),
+		     IP_FORMAT1(entry->ipv4_dslite.sip), IP_FORMAT0(entry->ipv4_dslite.sip),
+		     entry->ipv4_dslite.sport,
+		     IP_FORMAT3(entry->ipv4_dslite.dip), IP_FORMAT2(entry->ipv4_dslite.dip),
+		     IP_FORMAT1(entry->ipv4_dslite.dip), IP_FORMAT0(entry->ipv4_dslite.dip),
+		     entry->ipv4_dslite.dport);
+		NAT_PRINT("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			  entry->ipv4_dslite.tunnel_sipv6_0, entry->ipv4_dslite.tunnel_sipv6_1,
+			  entry->ipv4_dslite.tunnel_sipv6_2, entry->ipv4_dslite.tunnel_sipv6_3,
+			  entry->ipv4_dslite.tunnel_dipv6_0, entry->ipv4_dslite.tunnel_dipv6_1,
+			  entry->ipv4_dslite.tunnel_dipv6_2, entry->ipv4_dslite.tunnel_dipv6_3);
+		NAT_PRINT
+		    ("IPv4 Org IP/Port: %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_dslite.sip), IP_FORMAT2(entry->ipv4_dslite.sip),
+		     IP_FORMAT1(entry->ipv4_dslite.sip), IP_FORMAT0(entry->ipv4_dslite.sip),
+		     entry->ipv4_dslite.sport,
+		     IP_FORMAT3(entry->ipv4_dslite.dip), IP_FORMAT2(entry->ipv4_dslite.dip),
+		     IP_FORMAT1(entry->ipv4_dslite.dip), IP_FORMAT0(entry->ipv4_dslite.dip),
+		     entry->ipv4_dslite.dport);
+		NAT_PRINT
+		    ("L4 New Port: %d->%d\n", entry->ipv4_dslite.new_sport, entry->ipv4_dslite.new_dport);
+#else
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_6rd.info_blk2);
+		NAT_PRINT("Create MAP-T (6 to 4)\n");
+		if (IS_IPV6_FLAB_EBL()) {
+			NAT_PRINT
+			    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Flow Label=%08X)\n",
+			     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			     entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1,
+			     entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3,
+			     ((entry->ipv6_5t_route.sport << 16) | (entry->ipv6_5t_route.
+								    dport)) & 0xFFFFF);
+			NAT_PRINT ("L4 New Port: %d->%d\n", entry->ipv6_6rd.new_sport, entry->ipv6_6rd.new_dport);
+		} else {
+			NAT_PRINT
+			    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+			     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			     entry->ipv6_6rd.sport, entry->ipv6_6rd.ipv6_dip0,
+			     entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2,
+			     entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport);
+			NAT_PRINT ("L4 New Port: %d->%d\n", entry->ipv6_6rd.new_sport, entry->ipv6_6rd.new_dport);
+		}
+#endif	     		
+	} else if (IS_IPV6_3T_ROUTE(entry)) {
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_3t_route.info_blk2);
+		NAT_PRINT("Create IPv6 3-Tuple entry\n");
+		NAT_PRINT
+		    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Prot=%d)\n",
+		     entry->ipv6_3t_route.ipv6_sip0, entry->ipv6_3t_route.ipv6_sip1,
+		     entry->ipv6_3t_route.ipv6_sip2, entry->ipv6_3t_route.ipv6_sip3,
+		     entry->ipv6_3t_route.ipv6_dip0, entry->ipv6_3t_route.ipv6_dip1,
+		     entry->ipv6_3t_route.ipv6_dip2, entry->ipv6_3t_route.ipv6_dip3,
+		     entry->ipv6_3t_route.prot);
+	} else if (IS_IPV6_5T_ROUTE(entry)) {
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_5t_route.info_blk2);
+		NAT_PRINT("Create IPv6 5-Tuple entry\n");
+		if (IS_IPV6_FLAB_EBL()) {
+			NAT_PRINT
+			    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Flow Label=%08X)\n",
+			     entry->ipv6_5t_route.ipv6_sip0, entry->ipv6_5t_route.ipv6_sip1,
+			     entry->ipv6_5t_route.ipv6_sip2, entry->ipv6_5t_route.ipv6_sip3,
+			     entry->ipv6_5t_route.ipv6_dip0, entry->ipv6_5t_route.ipv6_dip1,
+			     entry->ipv6_5t_route.ipv6_dip2, entry->ipv6_5t_route.ipv6_dip3,
+			     ((entry->ipv6_5t_route.sport << 16) | (entry->ipv6_5t_route.
+								    dport)) & 0xFFFFF);
+		} else {
+			NAT_PRINT
+			    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+			     entry->ipv6_5t_route.ipv6_sip0, entry->ipv6_5t_route.ipv6_sip1,
+			     entry->ipv6_5t_route.ipv6_sip2, entry->ipv6_5t_route.ipv6_sip3,
+			     entry->ipv6_5t_route.sport, entry->ipv6_5t_route.ipv6_dip0,
+			     entry->ipv6_5t_route.ipv6_dip1, entry->ipv6_5t_route.ipv6_dip2,
+			     entry->ipv6_5t_route.ipv6_dip3, entry->ipv6_5t_route.dport);
+		}
+	} else if (IS_IPV6_6RD(entry)) {
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_6rd.info_blk2);
+		NAT_PRINT("Create IPv6 6RD entry\n");
+		if (IS_IPV6_FLAB_EBL()) {
+			NAT_PRINT
+			    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Flow Label=%08X)\n",
+			     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			     entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1,
+			     entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3,
+			     ((entry->ipv6_5t_route.sport << 16) | (entry->ipv6_5t_route.
+								    dport)) & 0xFFFFF);
+		} else {
+			NAT_PRINT
+			    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+			     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			     entry->ipv6_6rd.sport, entry->ipv6_6rd.ipv6_dip0,
+			     entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2,
+			     entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport);
+		}
+	}
+
+	if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) {
+		NAT_PRINT("DMAC=%02X:%02X:%02X:%02X:%02X:%02X SMAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
+			  entry->ipv4_hnapt.dmac_hi[3], entry->ipv4_hnapt.dmac_hi[2],
+			  entry->ipv4_hnapt.dmac_hi[1], entry->ipv4_hnapt.dmac_hi[0],
+			  entry->ipv4_hnapt.dmac_lo[1], entry->ipv4_hnapt.dmac_lo[0],
+			  entry->ipv4_hnapt.smac_hi[3], entry->ipv4_hnapt.smac_hi[2],
+			  entry->ipv4_hnapt.smac_hi[1], entry->ipv4_hnapt.smac_hi[0],
+			  entry->ipv4_hnapt.smac_lo[1], entry->ipv4_hnapt.smac_lo[0]);
+		NAT_PRINT("State = %s, ",
+			  entry->bfib1.state ==
+			  0 ? "Invalid" : entry->bfib1.state ==
+			  1 ? "Unbind" : entry->bfib1.state ==
+			  2 ? "BIND" : entry->bfib1.state ==
+			  3 ? "FIN" : "Unknown");
+		NAT_PRINT("Vlan_Layer = %u, ",
+			  entry->bfib1.vlan_layer);
+		NAT_PRINT("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+			  entry->ipv4_hnapt.etype, entry->ipv4_hnapt.vlan1,
+			  entry->ipv4_hnapt.vlan2_winfo);
+		NAT_PRINT("mib = %d, multicast = %d, pppoe = %d, proto = %s, act_dp = %d rx_idx = %d, dma_ring_no = %d\n",
+			  entry->ipv4_hnapt.iblk2.mibf,
+			  entry->ipv4_hnapt.iblk2.mcast,
+			  entry->ipv4_hnapt.bfib1.psn,
+			  entry->ipv4_hnapt.bfib1.udp == 0 ? "TCP" :
+			  entry->ipv4_hnapt.bfib1.udp == 1 ? "UDP" : "Unknown",
+			  entry->ipv4_hnapt.act_dp,
+			  entry->ipv4_hnapt.rxif_idx,
+			  entry->ipv4_hnapt.iblk2.rx_id
+			  );
+		NAT_PRINT("=========================================\n\n");
+	} else {
+		NAT_PRINT("DMAC=%02X:%02X:%02X:%02X:%02X:%02X SMAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
+			  entry->ipv6_5t_route.dmac_hi[3], entry->ipv6_5t_route.dmac_hi[2],
+			  entry->ipv6_5t_route.dmac_hi[1], entry->ipv6_5t_route.dmac_hi[0],
+			  entry->ipv6_5t_route.dmac_lo[1], entry->ipv6_5t_route.dmac_lo[0],
+			  entry->ipv6_5t_route.smac_hi[3], entry->ipv6_5t_route.smac_hi[2],
+			  entry->ipv6_5t_route.smac_hi[1], entry->ipv6_5t_route.smac_hi[0],
+			  entry->ipv6_5t_route.smac_lo[1], entry->ipv6_5t_route.smac_lo[0]);
+		NAT_PRINT("State = %s, STC = %s, ", entry->bfib1.state ==
+			  0 ? "Invalid" : entry->bfib1.state ==
+			  1 ? "Unbind" : entry->bfib1.state ==
+			  2 ? "BIND" : entry->bfib1.state ==
+			  3 ? "FIN" : "Unknown", entry->bfib1.sta ==
+			  0 ? "Dynamic" : entry->bfib1.sta ==
+			  1 ? "static" : "Unknown");
+
+		NAT_PRINT("Vlan_Layer = %u, ",
+			  entry->bfib1.vlan_layer);
+		NAT_PRINT("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+			  entry->ipv6_5t_route.etype,
+			  entry->ipv6_5t_route.vlan1,
+			  entry->ipv6_5t_route.vlan2_winfo);
+		NAT_PRINT("mib = %d, multicast = %d, pppoe = %d, proto = %s, act_dp = %d",
+			  entry->ipv6_5t_route.iblk2.mibf,
+			  entry->ipv6_5t_route.iblk2.mcast,
+			  entry->ipv6_5t_route.bfib1.psn,
+			  entry->ipv6_5t_route.bfib1.udp ==
+			  0 ? "TCP" : entry->ipv6_5t_route.bfib1.udp ==
+			  1 ? "UDP" : "Unknown",
+			  entry->ipv6_5t_route.act_dp);
+		NAT_PRINT(" Remove tunnel = %u\n", entry->bfib1.rmt);
+		NAT_PRINT("=========================================\n\n");
+	}
+	
+}
+
+int foe_get_ppe_entries(struct hwnat_args *opt1, int count, struct foe_entry *foe_base)
+{
+	struct foe_entry *entry;
+	int hash_index = 0;
+
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+		entry = &foe_base[hash_index];
+		if (entry->bfib1.state == opt1->entry_state) {
+			opt1->entries[count].hash_index = hash_index;
+			opt1->entries[count].pkt_type = entry->ipv4_hnapt.bfib1.pkt_type;
+
+			/* Extra info */
+			opt1->entries[count].fport = opt1->entries[count].fqos = opt1->entries[count].qid = 0;
+			if (IS_IPV4_GRP(entry)) {
+				opt1->entries[count].fport = entry->ipv4_hnapt.iblk2.acnt;
+				opt1->entries[count].fqos =  ppe_force_port(entry);
+				opt1->entries[count].qid = ppe_qid(entry);
+			}
+			if (fe_feature & HNAT_IPV6) {
+				if (IS_IPV6_GRP(entry)) {
+					opt1->entries[count].fport = entry->ipv6_5t_route.iblk2.acnt;
+					opt1->entries[count].fqos = ppe_force_port(entry);
+					opt1->entries[count].qid =  ppe_qid(entry);
+				}
+			}
+			opt1->entries[count].rxif_idx = get_rxif_idx(entry);
+
+			if (IS_IPV4_HNAT(entry)) {
+				opt1->entries[count].ing_sipv4 = entry->ipv4_hnapt.sip;
+				opt1->entries[count].ing_dipv4 = entry->ipv4_hnapt.dip;
+				opt1->entries[count].eg_sipv4 = entry->ipv4_hnapt.new_sip;
+				opt1->entries[count].eg_dipv4 = entry->ipv4_hnapt.new_dip;
+				count++;
+			} else if (IS_IPV4_HNAPT(entry)) {
+				opt1->entries[count].ing_sipv4 = entry->ipv4_hnapt.sip;
+				opt1->entries[count].ing_dipv4 = entry->ipv4_hnapt.dip;
+				opt1->entries[count].eg_sipv4 = entry->ipv4_hnapt.new_sip;
+				opt1->entries[count].eg_dipv4 = entry->ipv4_hnapt.new_dip;
+				opt1->entries[count].ing_sp = entry->ipv4_hnapt.sport;
+				opt1->entries[count].ing_dp = entry->ipv4_hnapt.dport;
+				opt1->entries[count].eg_sp = entry->ipv4_hnapt.new_sport;
+				opt1->entries[count].eg_dp = entry->ipv4_hnapt.new_dport;
+				count++;
+			}
+			if (fe_feature & HNAT_IPV6) {
+				if (IS_IPV6_1T_ROUTE(entry)) {
+					opt1->entries[count].ing_dipv6_0 = entry->ipv6_1t_route.ipv6_dip3;
+					opt1->entries[count].ing_dipv6_1 = entry->ipv6_1t_route.ipv6_dip2;
+					opt1->entries[count].ing_dipv6_2 = entry->ipv6_1t_route.ipv6_dip1;
+					opt1->entries[count].ing_dipv6_3 = entry->ipv6_1t_route.ipv6_dip0;
+					count++;
+				} else if (IS_IPV4_DSLITE(entry)) {
+					opt1->entries[count].ing_sipv4 = entry->ipv4_dslite.sip;
+					opt1->entries[count].ing_dipv4 = entry->ipv4_dslite.dip;
+					opt1->entries[count].ing_sp = entry->ipv4_dslite.sport;
+					opt1->entries[count].ing_dp = entry->ipv4_dslite.dport;
+					opt1->entries[count].eg_sipv6_0 = entry->ipv4_dslite.tunnel_sipv6_0;
+					opt1->entries[count].eg_sipv6_1 = entry->ipv4_dslite.tunnel_sipv6_1;
+					opt1->entries[count].eg_sipv6_2 = entry->ipv4_dslite.tunnel_sipv6_2;
+					opt1->entries[count].eg_sipv6_3 = entry->ipv4_dslite.tunnel_sipv6_3;
+					opt1->entries[count].eg_dipv6_0 = entry->ipv4_dslite.tunnel_dipv6_0;
+					opt1->entries[count].eg_dipv6_1 = entry->ipv4_dslite.tunnel_dipv6_1;
+					opt1->entries[count].eg_dipv6_2 = entry->ipv4_dslite.tunnel_dipv6_2;
+					opt1->entries[count].eg_dipv6_3 = entry->ipv4_dslite.tunnel_dipv6_3;
+					count++;
+				} else if (IS_IPV6_3T_ROUTE(entry)) {
+					opt1->entries[count].ing_sipv6_0 = entry->ipv6_3t_route.ipv6_sip0;
+					opt1->entries[count].ing_sipv6_1 = entry->ipv6_3t_route.ipv6_sip1;
+					opt1->entries[count].ing_sipv6_2 = entry->ipv6_3t_route.ipv6_sip2;
+					opt1->entries[count].ing_sipv6_3 = entry->ipv6_3t_route.ipv6_sip3;
+					opt1->entries[count].ing_dipv6_0 = entry->ipv6_3t_route.ipv6_dip0;
+					opt1->entries[count].ing_dipv6_1 = entry->ipv6_3t_route.ipv6_dip1;
+					opt1->entries[count].ing_dipv6_2 = entry->ipv6_3t_route.ipv6_dip2;
+					opt1->entries[count].ing_dipv6_3 = entry->ipv6_3t_route.ipv6_dip3;
+					opt1->entries[count].prot = entry->ipv6_3t_route.prot;
+					count++;
+				} else if (IS_IPV6_5T_ROUTE(entry)) {
+					opt1->entries[count].ing_sipv6_0 = entry->ipv6_5t_route.ipv6_sip0;
+					opt1->entries[count].ing_sipv6_1 = entry->ipv6_5t_route.ipv6_sip1;
+					opt1->entries[count].ing_sipv6_2 = entry->ipv6_5t_route.ipv6_sip2;
+					opt1->entries[count].ing_sipv6_3 = entry->ipv6_5t_route.ipv6_sip3;
+					opt1->entries[count].ing_sp = entry->ipv6_5t_route.sport;
+					opt1->entries[count].ing_dp = entry->ipv6_5t_route.dport;
+
+					opt1->entries[count].ing_dipv6_0 = entry->ipv6_5t_route.ipv6_dip0;
+					opt1->entries[count].ing_dipv6_1 = entry->ipv6_5t_route.ipv6_dip1;
+					opt1->entries[count].ing_dipv6_2 = entry->ipv6_5t_route.ipv6_dip2;
+					opt1->entries[count].ing_dipv6_3 = entry->ipv6_5t_route.ipv6_dip3;
+					opt1->entries[count].ipv6_flowlabel = IS_IPV6_FLAB_EBL();
+					count++;
+				} else if (IS_IPV6_6RD(entry)) {
+					opt1->entries[count].ing_sipv6_0 = entry->ipv6_6rd.ipv6_sip0;
+					opt1->entries[count].ing_sipv6_1 = entry->ipv6_6rd.ipv6_sip1;
+					opt1->entries[count].ing_sipv6_2 = entry->ipv6_6rd.ipv6_sip2;
+					opt1->entries[count].ing_sipv6_3 = entry->ipv6_6rd.ipv6_sip3;
+
+					opt1->entries[count].ing_dipv6_0 = entry->ipv6_6rd.ipv6_dip0;
+					opt1->entries[count].ing_dipv6_1 = entry->ipv6_6rd.ipv6_dip1;
+					opt1->entries[count].ing_dipv6_2 = entry->ipv6_6rd.ipv6_dip2;
+					opt1->entries[count].ing_dipv6_3 = entry->ipv6_6rd.ipv6_dip3;
+					opt1->entries[count].ing_sp = entry->ipv6_6rd.sport;
+					opt1->entries[count].ing_dp = entry->ipv6_6rd.dport;
+					opt1->entries[count].ipv6_flowlabel = IS_IPV6_FLAB_EBL();
+
+					opt1->entries[count].eg_sipv4 = entry->ipv6_6rd.tunnel_sipv4;
+					opt1->entries[count].eg_dipv4 = entry->ipv6_6rd.tunnel_dipv4;
+					count++;
+				}
+			}
+
+		}
+
+		if (count >= 16 * 1024)
+			break;
+	}
+
+	return count;
+
+}
+
+
+int foe_get_all_entries(struct hwnat_args *opt1)
+{
+	int ppe_count, ppe1_count;		/* valid entry count */
+
+	ppe_count = foe_get_ppe_entries(opt1, 0, ppe_foe_base);
+	if (ppe_count < 16 * 1024) {
+		ppe1_count = foe_get_ppe_entries(opt1, ppe_count, ppe1_foe_base);
+	}
+
+	opt1->num_of_entries = ppe1_count;
+
+	if (opt1->num_of_entries > 0)
+		return HWNAT_SUCCESS;
+	else
+		return HWNAT_ENTRY_NOT_FOUND;
+}
+
+int foe_bind_entry(struct hwnat_args *opt1)
+{
+	struct foe_entry *entry;
+
+	if (((u32)opt1->entry_num >= FOE_4TB_SIZ))
+		return HWNAT_FAIL;
+
+	entry = &ppe_foe_base[(u32)opt1->entry_num];
+
+	/* restore right information block1 */
+	entry->bfib1.time_stamp = reg_read(FOE_TS) & 0xFFFF;
+	entry->bfib1.state = BIND;
+
+	return HWNAT_SUCCESS;
+}
+
+int foe_un_bind_entry(struct hwnat_args *opt)
+{
+	struct foe_entry *entry;
+
+	if (((u32)opt->entry_num >= FOE_4TB_SIZ))
+		return HWNAT_FAIL;
+
+	entry = &ppe_foe_base[(u32)opt->entry_num];
+
+	entry->ipv4_hnapt.udib1.state = INVALID;
+	entry->ipv4_hnapt.udib1.time_stamp = reg_read(FOE_TS) & 0xFF;
+
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+
+	return HWNAT_SUCCESS;
+}
+
+int _foe_drop_entry(unsigned int entry_num)
+{
+	struct foe_entry *entry;
+
+	entry = &ppe_foe_base[entry_num];
+
+	entry->ipv4_hnapt.iblk2.dp = 7;
+
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+
+	return HWNAT_SUCCESS;
+}
+EXPORT_SYMBOL(_foe_drop_entry);
+
+int foe_drop_entry(struct hwnat_args *opt)
+{
+	if (((u32)opt->entry_num >= FOE_4TB_SIZ))
+		return HWNAT_FAIL;
+
+	return _foe_drop_entry(opt->entry_num);
+}
+
+int foe_del_entry_by_num(uint32_t entry_num)
+{
+	struct foe_entry *entry;
+
+	if (entry_num >= FOE_4TB_SIZ)
+		return HWNAT_FAIL;
+
+	entry = &ppe_foe_base[entry_num];
+	memset(entry, 0, sizeof(struct foe_entry));
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+
+	return HWNAT_SUCCESS;
+}
+
+void foe_tbl_clean(void)
+{
+	u32 foe_tbl_size;
+
+	foe_tbl_size = FOE_4TB_SIZ * sizeof(struct foe_entry);
+	memset(ppe_foe_base, 0, foe_tbl_size);
+	memset(ppe1_foe_base, 0, foe_tbl_size);
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+}
+EXPORT_SYMBOL(foe_tbl_clean);
+
+void hw_nat_l2_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		foe_set_mac_hi_info(entry->ipv4_hnapt.dmac_hi, opt->dmac);
+		foe_set_mac_lo_info(entry->ipv4_hnapt.dmac_lo, opt->dmac);
+		foe_set_mac_hi_info(entry->ipv4_hnapt.smac_hi, opt->smac);
+		foe_set_mac_lo_info(entry->ipv4_hnapt.smac_lo, opt->smac);
+		entry->ipv4_hnapt.vlan1 = opt->vlan1;
+		/* warp hwnat not support vlan2 */
+		/*mt7622 wifi hwnat not support vlan2*/
+		//entry->ipv4_hnapt.vlan2_winfo = opt->vlan2;
+		entry->ipv4_hnapt.etype = opt->etype;
+		entry->ipv4_hnapt.pppoe_id = opt->pppoe_id;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			foe_set_mac_hi_info(entry->ipv6_5t_route.dmac_hi, opt->dmac);
+			foe_set_mac_lo_info(entry->ipv6_5t_route.dmac_lo, opt->dmac);
+			foe_set_mac_hi_info(entry->ipv6_5t_route.smac_hi, opt->smac);
+			foe_set_mac_lo_info(entry->ipv6_5t_route.smac_lo, opt->smac);
+			entry->ipv6_5t_route.vlan1 = opt->vlan1;
+			/*mt7622 wifi hwnat not support vlan2*/
+			//entry->ipv6_5t_route.vlan2_winfo = opt->vlan2;
+			entry->ipv6_5t_route.etype = opt->etype;
+			entry->ipv6_5t_route.pppoe_id = opt->pppoe_id;
+		}
+	} else if ((opt->pkt_type) == IPV4_DSLITE) {
+		foe_set_mac_hi_info(entry->ipv4_dslite.dmac_hi, opt->dmac);
+		foe_set_mac_lo_info(entry->ipv4_dslite.dmac_lo, opt->dmac);
+		foe_set_mac_hi_info(entry->ipv4_dslite.smac_hi, opt->smac);
+		foe_set_mac_lo_info(entry->ipv4_dslite.smac_lo, opt->smac);
+		entry->ipv4_dslite.vlan1 = opt->vlan1;
+			/*mt7622 wifi hwnat not support vlan2*/
+		//entry->ipv4_dslite.vlan2_winfo = opt->vlan2;
+		entry->ipv4_dslite.pppoe_id = opt->pppoe_id;
+		entry->ipv4_dslite.etype = opt->etype;
+	} else if ((opt->pkt_type) == IPV6_6RD) {
+		foe_set_mac_hi_info(entry->ipv6_6rd.dmac_hi, opt->dmac);
+		foe_set_mac_lo_info(entry->ipv6_6rd.dmac_lo, opt->dmac);
+		foe_set_mac_hi_info(entry->ipv6_6rd.smac_hi, opt->smac);
+		foe_set_mac_lo_info(entry->ipv6_6rd.smac_lo, opt->smac);
+		entry->ipv6_6rd.vlan1 = opt->vlan1;
+			/*mt7622 wifi hwnat not support vlan2*/
+		entry->ipv6_6rd.vlan2_winfo = opt->vlan2;
+		entry->ipv6_6rd.pppoe_id = opt->pppoe_id;
+		entry->ipv6_6rd.etype = opt->etype;
+	}
+}
+
+struct test_hdr {
+        u8	flow_lbl[3];
+};
+
+void hw_nat_l3_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	struct test_hdr test;
+
+	test.flow_lbl[0] = 0x56;
+	test.flow_lbl[1] = 0x12;
+	test.flow_lbl[2] = 0xab;
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		entry->ipv4_hnapt.sip = opt->ing_sipv4;
+		entry->ipv4_hnapt.dip = opt->ing_dipv4;
+		entry->ipv4_hnapt.new_sip = opt->eg_sipv4;
+		entry->ipv4_hnapt.new_dip = opt->eg_dipv4;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			entry->ipv6_5t_route.ipv6_sip0 = opt->ing_sipv6_0;
+			entry->ipv6_5t_route.ipv6_sip1 = opt->ing_sipv6_1;
+			entry->ipv6_5t_route.ipv6_sip2 = opt->ing_sipv6_2;
+			entry->ipv6_5t_route.ipv6_sip3 = opt->ing_sipv6_3;
+
+			entry->ipv6_5t_route.ipv6_dip0 = opt->ing_dipv6_0;
+			entry->ipv6_5t_route.ipv6_dip1 = opt->ing_dipv6_1;
+			entry->ipv6_5t_route.ipv6_dip2 = opt->ing_dipv6_2;
+			entry->ipv6_5t_route.ipv6_dip3 = opt->ing_dipv6_3;
+		}
+
+/*		pr_info("opt->ing_sipv6_0 = %x\n", opt->ing_sipv6_0);*/
+/*		pr_info("opt->ing_sipv6_1 = %x\n", opt->ing_sipv6_1);*/
+/*		pr_info("opt->ing_sipv6_2 = %x\n", opt->ing_sipv6_2);*/
+/*		pr_info("opt->ing_sipv6_3 = %x\n", opt->ing_sipv6_3);*/
+/*		pr_info("opt->ing_dipv6_0 = %x\n", opt->ing_dipv6_0);*/
+/*		pr_info("opt->ing_dipv6_1 = %x\n", opt->ing_dipv6_1);*/
+/*		pr_info("opt->ing_dipv6_2 = %x\n", opt->ing_dipv6_2);*/
+/*		pr_info("opt->ing_dipv6_3 = %x\n", opt->ing_dipv6_3);*/
+
+/*		pr_info("entry->ipv6_5t_route.ipv6_sip0 = %x\n", entry->ipv6_5t_route.ipv6_sip0);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_sip1 = %x\n", entry->ipv6_5t_route.ipv6_sip1);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_sip2 = %x\n", entry->ipv6_5t_route.ipv6_sip2);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_sip3 = %x\n", entry->ipv6_5t_route.ipv6_sip3);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_dip0 = %x\n", entry->ipv6_5t_route.ipv6_dip0);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_dip1 = %x\n", entry->ipv6_5t_route.ipv6_dip1);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_dip2 = %x\n", entry->ipv6_5t_route.ipv6_dip2);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_dip3 = %x\n", entry->ipv6_5t_route.ipv6_dip3);*/
+	} else if((opt->pkt_type) == IPV4_DSLITE){
+		entry->ipv4_dslite.iblk2.mibf = 1;
+		if ((opt->rmt == 0)) {
+			entry->ipv4_dslite.tunnel_sipv6_0 = opt->ing_sipv6_0;
+			entry->ipv4_dslite.tunnel_sipv6_1 = opt->ing_sipv6_1;
+			entry->ipv4_dslite.tunnel_sipv6_2 = opt->ing_sipv6_2;
+			entry->ipv4_dslite.tunnel_sipv6_3 = opt->ing_sipv6_3;
+			entry->ipv4_dslite.tunnel_dipv6_0 = opt->ing_dipv6_0;
+			entry->ipv4_dslite.tunnel_dipv6_1 = opt->ing_dipv6_1;
+			entry->ipv4_dslite.tunnel_dipv6_2 = opt->ing_dipv6_2;
+			entry->ipv4_dslite.tunnel_dipv6_3 = opt->ing_dipv6_3;
+			entry->ipv4_dslite.iblk2.mibf = 1;
+			entry->ipv4_dslite.priority = 0xf;
+			entry->ipv4_dslite.hop_limit = 120;
+			memcpy(entry->ipv4_dslite.flow_lbl, test.flow_lbl, 3);
+			/* IPv4 DS-Lite and IPv6 6RD shall be turn on by SW during initialization */
+			entry->bfib1.pkt_type = IPV4_DSLITE;
+		}
+	} else if((opt->pkt_type) == IPV6_6RD){
+		/* fill in ipv4 6rd entry */
+		entry->ipv6_6rd.tunnel_sipv4 = opt->ing_sipv4;
+		entry->ipv6_6rd.tunnel_dipv4 = opt->ing_dipv4;
+		//entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(&ppe_parse_result->iph);
+		entry->ipv6_6rd.hdr_chksum = opt->checksum;
+		pr_notice("opt->checksum = %x\n", opt->checksum);
+		entry->ipv6_6rd.flag = opt->frag;
+		entry->ipv6_6rd.ttl = opt->ttl;
+		entry->ipv6_6rd.dscp = 0;
+		entry->ipv6_6rd.iblk2.mibf = 1;
+		reg_modify_bits(PPE_6RD_ID, 0, 0, 16);
+		reg_modify_bits(PPE1_6RD_ID, 0, 0, 16);
+		pr_notice("PPE_6RD_ID = %x\n", reg_read(PPE_6RD_ID));
+		entry->ipv6_6rd.per_flow_6rd_id = 1;
+		/* IPv4 DS-Lite and IPv6 6RD shall be turn on by SW during initialization */
+		entry->bfib1.pkt_type = IPV6_6RD;
+	}
+}
+
+void hw_nat_l4_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		entry->ipv4_hnapt.dport = opt->ing_dp;
+		entry->ipv4_hnapt.sport = opt->ing_sp;
+		entry->ipv4_hnapt.new_dport = opt->eg_dp;
+		entry->ipv4_hnapt.new_sport = opt->eg_sp;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			entry->ipv6_5t_route.dport = opt->ing_dp;
+			entry->ipv6_5t_route.sport = opt->ing_sp;
+		}
+	}
+}
+
+void hw_nat_ib1_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+
+	entry->ipv4_hnapt.bfib1.cah = 1;
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		entry->ipv4_hnapt.bfib1.pkt_type = IPV4_NAPT;
+		entry->ipv4_hnapt.bfib1.sta = 1;
+		entry->ipv4_hnapt.bfib1.udp = opt->is_udp; /* tcp/udp */
+		entry->ipv4_hnapt.bfib1.state = BIND;
+		entry->ipv4_hnapt.bfib1.ka = 1; /* keepalive */
+		entry->ipv4_hnapt.bfib1.ttl = 1; /* TTL-1 */
+		entry->ipv4_hnapt.bfib1.psn = opt->pppoe_act; /* insert / remove */
+		entry->ipv4_hnapt.bfib1.vlan_layer = opt->vlan_layer;
+		entry->ipv4_hnapt.bfib1.time_stamp = reg_read(FOE_TS) & 0x3FFF;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			entry->ipv6_5t_route.bfib1.pkt_type = IPV6_ROUTING;
+			entry->ipv6_5t_route.bfib1.sta = 1;
+			entry->ipv6_5t_route.bfib1.udp = opt->is_udp; /* tcp/udp */
+			entry->ipv6_5t_route.bfib1.state = BIND;
+			entry->ipv6_5t_route.bfib1.ka = 0; /* keepalive */
+			entry->ipv6_5t_route.bfib1.ttl = 1; /* TTL-1 */
+			entry->ipv6_5t_route.bfib1.psn = opt->pppoe_act; /* insert / remove */
+			entry->ipv6_5t_route.bfib1.vlan_layer = opt->vlan_layer;
+			entry->ipv6_5t_route.bfib1.time_stamp = reg_read(FOE_TS) & 0x3FFF;
+		}
+	} else if ((opt->pkt_type) == IPV4_DSLITE){
+		entry->ipv4_dslite.bfib1.rmt = opt->rmt;
+		if (opt->rmt == 0)
+			entry->ipv4_dslite.bfib1.pkt_type = IPV4_DSLITE;
+
+		entry->ipv4_dslite.bfib1.sta = 1;
+		entry->ipv4_dslite.bfib1.udp = opt->is_udp; /* tcp/udp */
+		entry->ipv4_dslite.bfib1.state = BIND;
+		entry->ipv4_dslite.bfib1.ka = 0; /* keepalive */
+		entry->ipv4_dslite.bfib1.ttl = 1; /* TTL-1 */
+		entry->ipv4_dslite.bfib1.vlan_layer = opt->vlan_layer;
+		entry->ipv4_dslite.bfib1.time_stamp = reg_read(FOE_TS) & 0x3FFF;
+	} else if ((opt->pkt_type) == IPV6_6RD){
+		entry->ipv6_6rd.bfib1.rmt = opt->rmt;
+		if (opt->rmt == 0)
+			entry->ipv6_6rd.bfib1.pkt_type = IPV6_6RD;
+
+		entry->ipv6_6rd.bfib1.sta = 1;
+		entry->ipv6_6rd.bfib1.udp = opt->is_udp; /* tcp/udp */
+		entry->ipv6_6rd.bfib1.state = BIND;
+		entry->ipv6_6rd.bfib1.ka = 0; /* keepalive */
+		entry->ipv6_6rd.bfib1.ttl = 1; /* TTL-1 */
+		entry->ipv6_6rd.bfib1.vlan_layer = opt->vlan_layer;
+		entry->ipv6_6rd.bfib1.time_stamp = reg_read(FOE_TS) & 0x3FFF;
+	}
+}
+
+void hw_nat_ib2_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		entry->ipv4_hnapt.iblk2.dp = (opt->dst_port) & 0x7; /* 0:cpu, 1:GE1 */
+		entry->ipv4_hnapt.iblk2.dscp = opt->dscp;
+
+	if ((opt->dst_port) >= 8) {
+#if defined(CONFIG_HNAT_V1)
+		entry->ipv4_hnapt.iblk2.dp1 = 1;
+		entry->ipv4_hnapt.iblk2.qid1 = 1;
+#endif	
+		entry->ipv4_hnapt.iblk2.qid = 0;
+		entry->ipv4_hnapt.iblk2.fqos = 0;
+	} else {
+#if defined(CONFIG_HNAT_V1)		
+	    entry->ipv4_hnapt.iblk2.dp1 = 0;
+#endif
+	    entry->ipv4_hnapt.iblk2.qid = 1;
+	    entry->ipv4_hnapt.iblk2.fqos = 1;
+	}
+
+	entry->ipv4_hnapt.iblk2.acnt = opt->dst_port;
+	entry->ipv4_hnapt.iblk2.mcast = 0;
+	entry->ipv4_hnapt.iblk2.mibf = 1;
+
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			entry->ipv6_5t_route.iblk2.dp = opt->dst_port; /* 0:cpu, 1:GE1 */
+			entry->ipv6_5t_route.iblk2.dscp = opt->dscp;
+			entry->ipv6_5t_route.iblk2.acnt = opt->dst_port;
+			entry->ipv6_5t_route.iblk2.mibf = 1;
+		}
+	} else if ((opt->pkt_type) == IPV4_DSLITE){
+		entry->ipv4_dslite.iblk2.dp = opt->dst_port; /* 0:cpu, 1:GE1 */
+		entry->ipv4_dslite.iblk2.dscp = opt->dscp;
+		entry->ipv4_dslite.iblk2.acnt = opt->dst_port;
+		entry->ipv4_dslite.iblk2.mibf = 1;
+	} else if ((opt->pkt_type) == IPV6_6RD){
+		entry->ipv6_6rd.iblk2.dp = opt->dst_port; /* 0:cpu, 1:GE1 */
+		entry->ipv6_6rd.iblk2.dscp = opt->dscp;
+		entry->ipv6_6rd.iblk2.acnt = opt->dst_port;
+		entry->ipv6_6rd.iblk2.mibf = 1;
+	}
+}
+
+void hw_nat_semi_bind(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	u32 current_time;
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		/* Set Current time to time_stamp field in information block 1 */
+		current_time = reg_read(FOE_TS) & 0xFFFF;
+		entry->bfib1.time_stamp = (uint16_t)current_time;
+		/* Ipv4: TTL / Ipv6: Hot Limit filed */
+		entry->ipv4_hnapt.bfib1.ttl = DFL_FOE_TTL_REGEN;
+		/* enable cache by default */
+		entry->ipv4_hnapt.bfib1.cah = 1;
+		/* Change Foe Entry State to Binding State */
+		entry->bfib1.state = BIND;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			/* Set Current time to time_stamp field in information block 1 */
+			current_time = reg_read(FOE_TS) & 0xFFFF;
+			entry->bfib1.time_stamp = (uint16_t)current_time;
+			/* Ipv4: TTL / Ipv6: Hot Limit filed */
+			entry->ipv4_hnapt.bfib1.ttl = DFL_FOE_TTL_REGEN;
+			/* enable cache by default */
+			entry->ipv4_hnapt.bfib1.cah = 1;
+			/* Change Foe Entry State to Binding State */
+			entry->bfib1.state = BIND;
+		}
+	}
+}
+
+int set_done_bit_zero(struct foe_entry *foe_entry)
+{
+	if (IS_IPV4_HNAT(foe_entry) || IS_IPV4_HNAPT(foe_entry))
+		foe_entry->ipv4_hnapt.resv1 = 0;
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV4_DSLITE(foe_entry)) {
+			//foe_entry->ipv4_dslite.resv1 = 0;
+		} else if (IS_IPV6_3T_ROUTE(foe_entry)) {
+			foe_entry->ipv6_3t_route.resv1 = 0;
+		} else if (IS_IPV6_5T_ROUTE(foe_entry)) {
+			foe_entry->ipv6_5t_route.resv1 = 0;
+		} else if (IS_IPV6_6RD(foe_entry)) {
+			foe_entry->ipv6_6rd.resv1 = 0;
+		} else {
+			pr_notice("%s:get packet format something wrong\n", __func__);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int get_entry_done_bit(struct foe_entry *foe_entry)
+{
+	int done_bit;
+
+	done_bit = 0;
+	if (IS_IPV4_HNAT(foe_entry) || IS_IPV4_HNAPT(foe_entry))
+		done_bit = foe_entry->ipv4_hnapt.resv1;
+#if(0)
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV4_DSLITE(foe_entry)) {
+			done_bit = foe_entry->ipv4_dslite.resv1;
+		} else if (IS_IPV6_3T_ROUTE(foe_entry)) {
+			done_bit = foe_entry->ipv6_3t_route.resv1;
+		} else if (IS_IPV6_5T_ROUTE(foe_entry)) {
+			done_bit = foe_entry->ipv6_5t_route.resv1;
+		} else if (IS_IPV6_6RD(foe_entry)) {
+			done_bit = foe_entry->ipv6_6rd.resv1;
+		} else {
+			pr_notice("%s:get packet format something wrong\n", __func__);
+			return -1;
+		}
+	}
+#endif
+	return done_bit;
+}
+
+int foe_add_entry_dvt(struct hwnat_tuple *opt)
+{
+
+	struct foe_entry *entry = NULL;
+	struct foe_entry *entry1 = NULL;
+
+	s32 hash_index;
+
+	if (((u32)opt->hash_index >= FOE_4TB_SIZ))
+		return HWNAT_FAIL;
+
+	hash_index = opt->hash_index;
+	pr_notice("opt->hash_index = %d\n", opt->hash_index);
+	pr_notice("FP = %d\n", opt->dst_port);
+	if (hash_index != 0) {
+
+		//entry =  &ppe_foe_base[hash_index];
+		entry =  &ppe_foe_base[hash_index];
+		hw_nat_l2_info(entry, opt);
+		hw_nat_l3_info(entry, opt);
+		if ((opt->pkt_type != IPV4_DSLITE) && (opt->pkt_type != IPV6_6RD))
+			hw_nat_l4_info(entry, opt);
+
+		hw_nat_ib1_info(entry, opt);
+		hw_nat_ib2_info(entry, opt);
+		foe_dump_entry(hash_index, entry);
+
+		entry1 =  &ppe1_foe_base[hash_index];
+		hw_nat_l2_info(entry1, opt);
+		hw_nat_l3_info(entry1, opt);
+		if ((opt->pkt_type != IPV4_DSLITE) && (opt->pkt_type != IPV6_6RD))
+			hw_nat_l4_info(entry1, opt);
+
+		hw_nat_ib1_info(entry1, opt);
+		hw_nat_ib2_info(entry1, opt);
+		foe_dump_entry(hash_index, entry1);
+
+
+		return HWNAT_SUCCESS;
+	}
+	pr_notice("No entry idx!!!\n");
+
+	return HWNAT_FAIL;
+}
+
+int foe_add_entry(struct hwnat_tuple *opt)
+{
+	struct foe_pri_key key;
+	struct foe_entry *entry = NULL;
+	s32 hash_index;
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+	int done_bit;
+#endif
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		key.ipv4_hnapt.sip = opt->ing_sipv4;
+		key.ipv4_hnapt.dip = opt->ing_dipv4;
+		key.ipv4_hnapt.sport = opt->ing_sp;
+		key.ipv4_hnapt.dport = opt->ing_dp;
+		key.ipv4_hnapt.is_udp = opt->is_udp;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		key.ipv6_routing.sip0 = opt->ing_sipv6_0;
+		key.ipv6_routing.sip1 = opt->ing_sipv6_1;
+		key.ipv6_routing.sip2 = opt->ing_sipv6_2;
+		key.ipv6_routing.sip3 = opt->ing_sipv6_3;
+		key.ipv6_routing.dip0 = opt->ing_dipv6_0;
+		key.ipv6_routing.dip1 = opt->ing_dipv6_1;
+		key.ipv6_routing.dip2 = opt->ing_dipv6_2;
+		key.ipv6_routing.dip3 = opt->ing_dipv6_3;
+		key.ipv6_routing.sport = opt->ing_sp;
+		key.ipv6_routing.dport = opt->ing_dp;
+		key.ipv6_routing.is_udp = opt->is_udp;
+	} else if ((opt->pkt_type) == IPV4_DSLITE) {
+		key.ipv4_dslite.sip_v4 = opt->ing_sipv4;
+		key.ipv4_dslite.dip_v4 = opt->ing_dipv4;
+		key.ipv4_dslite.sip0_v6 = opt->ing_sipv6_0;
+		key.ipv4_dslite.sip1_v6 = opt->ing_sipv6_1;
+		key.ipv4_dslite.sip2_v6 = opt->ing_sipv6_2;
+		key.ipv4_dslite.sip3_v6 = opt->ing_sipv6_3;
+		key.ipv4_dslite.dip0_v6 = opt->ing_dipv6_0;
+		key.ipv4_dslite.dip1_v6 = opt->ing_dipv6_1;
+		key.ipv4_dslite.dip2_v6 = opt->ing_dipv6_2;
+		key.ipv4_dslite.dip3_v6 = opt->ing_dipv6_3;
+		key.ipv4_dslite.sport = opt->ing_sp;
+		key.ipv4_dslite.dport = opt->ing_dp;
+		key.ipv4_dslite.is_udp = opt->is_udp;
+
+	}
+
+
+	key.pkt_type = opt->pkt_type;
+#if(0)
+	if (fe_feature & MANUAL_MODE)
+		hash_index = get_ppe_entry_idx(&key, entry, 0);
+	else
+		hash_index = get_ppe_entry_idx(&key, entry, 1);
+#endif
+
+	hash_index = opt->hash_index;
+	pr_notice("opt->hash_index = %d\n", opt->hash_index);
+	if (hash_index != -1) {
+		//opt->hash_index = hash_index;
+		entry =  &ppe_foe_base[hash_index];
+		//if (fe_feature & MANUAL_MODE) {
+		hw_nat_l2_info(entry, opt);
+		hw_nat_l3_info(entry, opt);
+		hw_nat_ib1_info(entry, opt);
+		hw_nat_ib2_info(entry, opt);
+		//}
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+		done_bit = get_entry_done_bit(entry);
+		if (done_bit == 1)
+			pr_notice("mtk_entry_add number =%d\n", hash_index);
+		else if (done_bit == 0)
+			pr_notice("ppe table not ready\n");
+		else
+			pr_notice("%s: done_bit something wrong\n", __func__);
+
+		if (done_bit != 1)
+			return HWNAT_FAIL;
+		hw_nat_semi_bind(entry, opt);
+#endif
+		foe_dump_entry(hash_index, entry);
+		return HWNAT_SUCCESS;
+	}
+
+	return HWNAT_FAIL;
+}
+
+int foe_del_entry(struct hwnat_tuple *opt)
+{
+	struct foe_pri_key key;
+	s32 hash_index;
+	struct foe_entry *entry = NULL;
+	s32 rply_idx;
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+	int done_bit;
+#endif
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		key.ipv4_hnapt.sip = opt->ing_sipv4;
+		key.ipv4_hnapt.dip = opt->ing_dipv4;
+		key.ipv4_hnapt.sport = opt->ing_sp;
+		key.ipv4_hnapt.dport = opt->ing_dp;
+		/* key.ipv4_hnapt.is_udp=opt->is_udp; */
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		key.ipv6_routing.sip0 = opt->ing_sipv6_0;
+		key.ipv6_routing.sip1 = opt->ing_sipv6_1;
+		key.ipv6_routing.sip2 = opt->ing_sipv6_2;
+		key.ipv6_routing.sip3 = opt->ing_sipv6_3;
+		key.ipv6_routing.dip0 = opt->ing_dipv6_0;
+		key.ipv6_routing.dip1 = opt->ing_dipv6_1;
+		key.ipv6_routing.dip2 = opt->ing_dipv6_2;
+		key.ipv6_routing.dip3 = opt->ing_dipv6_3;
+		key.ipv6_routing.sport = opt->ing_sp;
+		key.ipv6_routing.dport = opt->ing_dp;
+		/* key.ipv6_routing.is_udp=opt->is_udp; */
+	}
+
+	key.pkt_type = opt->pkt_type;
+
+	/* find bind entry */
+	/* hash_index = FoeHashFun(&key,BIND); */
+	hash_index = get_ppe_entry_idx(&key, entry, 1);
+	if (hash_index != -1) {
+		opt->hash_index = hash_index;
+		rply_idx = reply_entry_idx(opt, hash_index);
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+			entry =  &ppe_foe_base[hash_index];
+			done_bit = get_entry_done_bit(entry);
+			if (done_bit == 1) {
+				set_done_bit_zero(entry);
+			} else if (done_bit == 0) {
+				pr_notice("%s : ppe table not ready\n", __func__);
+			} else {
+				pr_notice("%s: done_bit something wrong\n", __func__);
+				set_done_bit_zero(entry);
+			}
+			if (done_bit != 1)
+				return HWNAT_FAIL;
+#endif
+		foe_del_entry_by_num(hash_index);
+		pr_notice("Clear Entry index = %d\n", hash_index);
+		if (rply_idx != -1) {
+		pr_notice("Clear Entry index = %d\n", rply_idx);
+			foe_del_entry_by_num(rply_idx);
+		}
+
+		return HWNAT_SUCCESS;
+	}
+	pr_notice("HWNAT ENTRY NOT FOUND\n");
+	return HWNAT_ENTRY_NOT_FOUND;
+}
+EXPORT_SYMBOL(foe_del_entry);
+
+int get_five_tule(struct sk_buff *skb)
+{
+	struct ethhdr *eth = NULL;
+	struct iphdr *iph = NULL;
+	struct ipv6hdr *ip6h = NULL;
+	struct tcphdr *th = NULL;
+	struct udphdr *uh = NULL;
+	u8 ipv6_head_len = 0;
+
+	memset(&ppe_parse_rx_result, 0, sizeof(ppe_parse_rx_result));
+	eth = (struct ethhdr *)skb->data;
+	ppe_parse_rx_result.eth_type = eth->h_proto;
+	/* set layer4 start addr */
+	if ((ppe_parse_rx_result.eth_type == htons(ETH_P_IP)) ||
+	    (ppe_parse_rx_result.eth_type == htons(ETH_P_PPP_SES) &&
+	    (ppe_parse_rx_result.ppp_tag == htons(PPP_IP)))) {
+		iph = (struct iphdr *)(skb->data + ETH_HLEN);
+		memcpy(&ppe_parse_rx_result.iph, iph, sizeof(struct iphdr));
+		if (iph->protocol == IPPROTO_TCP) {
+			skb_set_transport_header(skb, ETH_HLEN + (iph->ihl * 4));
+			th = (struct tcphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_rx_result.th, th, sizeof(struct tcphdr));
+			ppe_parse_rx_result.pkt_type = IPV4_HNAPT;
+			if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+				if (debug_level >= 2)
+					DD;
+				return 1;
+			}
+		} else if (iph->protocol == IPPROTO_UDP) {
+			skb_set_transport_header(skb, ETH_HLEN + (iph->ihl * 4));
+			uh = (struct udphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_rx_result.uh, uh, sizeof(struct udphdr));
+			ppe_parse_rx_result.pkt_type = IPV4_HNAPT;
+			if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+				if (USE_3T_UDP_FRAG == 0)
+					return 1;
+			}
+		} else if (iph->protocol == IPPROTO_GRE) {
+			if (debug_level >= 2)
+				/* do nothing */
+				return 1;
+		}
+		if (fe_feature & HNAT_IPV6) {
+			if (iph->protocol == IPPROTO_IPV6) {
+				ip6h = (struct ipv6hdr *)((uint8_t *)iph + iph->ihl * 4);
+				memcpy(&ppe_parse_rx_result.ip6h, ip6h, sizeof(struct ipv6hdr));
+				if (ip6h->nexthdr == NEXTHDR_TCP) {
+					skb_set_transport_header(skb, ETH_HLEN + (sizeof(struct ipv6hdr)));
+					th = (struct tcphdr *)skb_transport_header(skb);
+					memcpy(&ppe_parse_rx_result.th.source, &th->source, sizeof(th->source));
+					memcpy(&ppe_parse_rx_result.th.dest, &th->dest, sizeof(th->dest));
+				} else if (ip6h->nexthdr == NEXTHDR_UDP) {
+					skb_set_transport_header(skb, ETH_HLEN + (sizeof(struct ipv6hdr)));
+					uh = (struct udphdr *)skb_transport_header(skb);
+					memcpy(&ppe_parse_rx_result.uh.source, &uh->source, sizeof(uh->source));
+					memcpy(&ppe_parse_rx_result.uh.dest, &uh->dest, sizeof(uh->dest));
+				}
+					ppe_parse_rx_result.pkt_type = IPV6_6RD;
+		/* identification field in outer ipv4 header is zero*/
+		/*after erntering binding state.*/
+		/* some 6rd relay router will drop the packet */
+			}
+		}
+		if ((iph->protocol != IPPROTO_TCP) && (iph->protocol != IPPROTO_UDP) &&
+		    (iph->protocol != IPPROTO_GRE) && (iph->protocol != IPPROTO_IPV6))
+			return 1;
+				/* Packet format is not supported */
+
+	} else if (ppe_parse_rx_result.eth_type == htons(ETH_P_IPV6) ||
+		   (ppe_parse_rx_result.eth_type == htons(ETH_P_PPP_SES) &&
+		    ppe_parse_rx_result.ppp_tag == htons(PPP_IPV6))) {
+		ip6h = (struct ipv6hdr *)skb_network_header(skb);
+		memcpy(&ppe_parse_rx_result.ip6h, ip6h, sizeof(struct ipv6hdr));
+		if (ip6h->nexthdr == NEXTHDR_TCP) {
+			skb_set_transport_header(skb, ETH_HLEN + (sizeof(struct ipv6hdr)));
+			th = (struct tcphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_rx_result.th, th, sizeof(struct tcphdr));
+			ppe_parse_rx_result.pkt_type = IPV6_5T_ROUTE;
+		} else if (ip6h->nexthdr == NEXTHDR_UDP) {
+			skb_set_transport_header(skb, ETH_HLEN + (sizeof(struct ipv6hdr)));
+			uh = (struct udphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_rx_result.uh, uh, sizeof(struct udphdr));
+			ppe_parse_rx_result.pkt_type = IPV6_5T_ROUTE;
+		} else if (ip6h->nexthdr == NEXTHDR_IPIP) {
+			ipv6_head_len = sizeof(struct iphdr);
+			memcpy(&ppe_parse_rx_result.iph, ip6h + ipv6_head_len,
+			       sizeof(struct iphdr));
+			ppe_parse_rx_result.pkt_type = IPV4_DSLITE;
+		} else {
+			ppe_parse_rx_result.pkt_type = IPV6_3T_ROUTE;
+		}
+
+	} else {
+				if (debug_level >= 2)
+					DD;
+		return 1;
+	}
+	return 0;
+}
+
+int decide_qid(u16 hash_index, struct sk_buff *skb)
+{
+	struct foe_entry *entry;
+	u32 saddr;
+	u32 daddr;
+
+	u32 ppe_saddr;
+	u32 ppe_daddr;
+	u32 ppe_sport;
+	u32 ppe_dport;
+
+	u32 sport = 0;
+	u32 dport = 0;
+
+	u32 ipv6_sip_127_96;
+	u32 ipv6_sip_95_64;
+	u32 ipv6_sip_63_32;
+	u32 ipv6_sip_31_0;
+
+	u32 ipv6_dip_127_96;
+	u32 ipv6_dip_95_64;
+	u32 ipv6_dip_63_32;
+	u32 ipv6_dip_31_0;
+
+	u32 ppe_saddr_127_96;
+	u32 ppe_saddr_95_64;
+	u32 ppe_saddr_63_32;
+	u32 ppe_saddr_31_0;
+
+	u32 ppe_daddr_127_96;
+	u32 ppe_daddr_95_64;
+	u32 ppe_daddr_63_32;
+	u32 ppe_daddr_31_0;
+
+	u32 ppe_sportv6;
+	u32 ppe_dportv6;
+
+	entry = &ppe_foe_base[hash_index];
+	if (IS_IPV4_HNAPT(entry)) {
+		saddr = ntohl(ppe_parse_rx_result.iph.saddr);
+		daddr = ntohl(ppe_parse_rx_result.iph.daddr);
+		if (ppe_parse_rx_result.iph.protocol == IPPROTO_TCP) {
+			sport = ntohs(ppe_parse_rx_result.th.source);
+			dport = ntohs(ppe_parse_rx_result.th.dest);
+		} else if (ppe_parse_rx_result.iph.protocol == IPPROTO_UDP) {
+			sport = ntohs(ppe_parse_rx_result.uh.source);
+			dport = ntohs(ppe_parse_rx_result.uh.dest);
+		}
+		ppe_saddr = entry->ipv4_hnapt.sip;
+		ppe_daddr = entry->ipv4_hnapt.dip;
+		ppe_sport = entry->ipv4_hnapt.sport;
+		ppe_dport = entry->ipv4_hnapt.dport;
+		if (debug_level >= 2) {
+			pr_notice("ppe_saddr = %x, ppe_daddr=%x, ppe_sport=%d, ppe_dport=%d, saddr=%x, daddr=%x, sport= %d, dport=%d\n",
+				ppe_saddr, ppe_daddr, ppe_sport, ppe_dport, saddr, daddr, sport, dport);
+		}
+		if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+		    (sport == ppe_sport) && (dport == ppe_dport) &&
+		    (entry->bfib1.state == BIND)) {
+			if (entry->ipv4_hnapt.iblk2.dp == 2) {
+				skb->dev = dst_port[DP_GMAC2];
+				if (debug_level >= 2)
+					pr_notice("qid = %d\n", entry->ipv4_hnapt.iblk2.qid);
+				skb->mark = entry->ipv4_hnapt.iblk2.qid;
+			} else{
+				skb->dev = dst_port[DP_GMAC1];
+				if (debug_level >= 2)
+					pr_notice("qid = %d\n", entry->ipv4_hnapt.iblk2.qid);
+				skb->mark = entry->ipv4_hnapt.iblk2.qid;
+			}
+			return 0;
+		} else {
+			return -1;
+		}
+	}
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_5T_ROUTE(entry)) {
+			ipv6_sip_127_96 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[0]);
+			ipv6_sip_95_64 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[1]);
+			ipv6_sip_63_32 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[2]);
+			ipv6_sip_31_0 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[3]);
+
+			ipv6_dip_127_96 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[0]);
+			ipv6_dip_95_64 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[1]);
+			ipv6_dip_63_32 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[2]);
+			ipv6_dip_31_0 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[3]);
+
+			ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+			ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+			ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+			ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+			ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+			ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+			ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+			ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+			ppe_sportv6 = entry->ipv6_5t_route.sport;
+			ppe_dportv6 = entry->ipv6_5t_route.dport;
+			if (ppe_parse_rx_result.iph.protocol == IPPROTO_TCP) {
+				sport = ntohs(ppe_parse_rx_result.th.source);
+				dport = ntohs(ppe_parse_rx_result.th.dest);
+			} else if (ppe_parse_rx_result.iph.protocol == IPPROTO_UDP) {
+				sport = ntohs(ppe_parse_rx_result.uh.source);
+				dport = ntohs(ppe_parse_rx_result.uh.dest);
+			}
+			if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+			    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+			    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+			    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+			    (sport == ppe_sportv6) && (dport == ppe_dportv6) &&
+			    (entry->bfib1.state == BIND)) {
+				if (entry->ipv6_5t_route.iblk2.dp == 2) {
+					skb->dev = dst_port[DP_GMAC2];
+						/* if (entry->ipv6_3t_route.iblk2.qid >= 11) */
+					skb->mark = (entry->ipv6_3t_route.iblk2.qid);
+				} else{
+					skb->dev = dst_port[DP_GMAC1];
+					skb->mark = (entry->ipv6_3t_route.iblk2.qid);
+				}
+			} else {
+				return -1;
+			}
+		}
+	}
+	return 0;
+}
+
+void set_qid(struct sk_buff *skb)
+{
+	struct foe_pri_key key;
+	s32 hash_index;
+	struct foe_entry *entry = NULL;
+
+	get_five_tule(skb);
+	if (ppe_parse_rx_result.pkt_type == IPV4_HNAPT) {
+		key.ipv4_hnapt.sip = ntohl(ppe_parse_rx_result.iph.saddr);
+		key.ipv4_hnapt.dip = ntohl(ppe_parse_rx_result.iph.daddr);
+
+		if (ppe_parse_rx_result.iph.protocol == IPPROTO_TCP) {
+			key.ipv4_hnapt.sport = ntohs(ppe_parse_rx_result.th.source);
+			key.ipv4_hnapt.dport = ntohs(ppe_parse_rx_result.th.dest);
+		} else if (ppe_parse_rx_result.iph.protocol == IPPROTO_UDP) {
+			key.ipv4_hnapt.sport = ntohs(ppe_parse_rx_result.uh.source);
+			key.ipv4_hnapt.dport = ntohs(ppe_parse_rx_result.uh.dest);
+		}
+		/* key.ipv4_hnapt.is_udp=opt->is_udp; */
+	} else if (ppe_parse_rx_result.pkt_type == IPV6_5T_ROUTE) {
+		key.ipv6_routing.sip0 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[0]);
+		key.ipv6_routing.sip1 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[1]);
+		key.ipv6_routing.sip2 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[2]);
+		key.ipv6_routing.sip3 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[3]);
+		key.ipv6_routing.dip0 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[0]);
+		key.ipv6_routing.dip1 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[1]);
+		key.ipv6_routing.dip2 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[2]);
+		key.ipv6_routing.dip3 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[3]);
+		if (ppe_parse_rx_result.ip6h.nexthdr == IPPROTO_TCP) {
+			key.ipv6_routing.sport = ntohs(ppe_parse_rx_result.th.source);
+			key.ipv6_routing.dport = ntohs(ppe_parse_rx_result.th.dest);
+		} else if (ppe_parse_rx_result.ip6h.nexthdr == IPPROTO_UDP) {
+			key.ipv6_routing.sport = ntohs(ppe_parse_rx_result.uh.source);
+			key.ipv6_routing.dport = ntohs(ppe_parse_rx_result.uh.dest);
+		}
+	}
+
+	key.pkt_type = ppe_parse_rx_result.pkt_type;
+
+	/* find bind entry */
+	/* hash_index = FoeHashFun(&key,BIND); */
+	hash_index = get_ppe_entry_idx(&key, entry, 1);
+	if (hash_index != -1)
+		decide_qid(hash_index, skb);
+	if (debug_level >= 6)
+		pr_notice("hash_index = %d\n", hash_index);
+}
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb.h b/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb.h
new file mode 100755
index 0000000..bec354d
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb.h
@@ -0,0 +1,998 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef _FOE_FDB_WANTED
+#define _FOE_FDB_WANTED
+
+#include <net/ip.h>
+#include "hnat_ioctl.h"
+/* #include "frame_engine.h" */
+
+extern struct foe_entry *ppe_foe_base;
+/* DEFINITIONS AND MACROS*/
+#define FOE_ENTRY_LIFE_TIME	5
+#define FOE_THRESHOLD		1000
+#define FOE_HASH_MASK		0x00001FFF
+#define FOE_HASH_WAY		2
+#define FOE_1K_SIZ_MASK		0x000001FF
+#define FOE_2K_SIZ_MASK		0x000003FF
+#define FOE_4K_SIZ_MASK		0x000007FF
+#define FOE_8K_SIZ_MASK		0x00000FFF
+#define FOE_16K_SIZ_MASK	0x00001FFF
+
+#if defined(CONFIG_RA_HW_NAT_TBL_1K)
+#define FOE_4TB_SIZ		1024
+#define FOE_4TB_BIT		10
+#elif defined(CONFIG_RA_HW_NAT_TBL_2K)
+#define FOE_4TB_SIZ		2048
+#define FOE_4TB_BIT             11
+#elif defined(CONFIG_RA_HW_NAT_TBL_4K)
+#define FOE_4TB_SIZ		4096
+#define FOE_4TB_BIT             12
+#elif defined(CONFIG_RA_HW_NAT_TBL_8K)
+#define FOE_4TB_SIZ		8192
+#define FOE_4TB_BIT             13
+#elif defined(CONFIG_RA_HW_NAT_TBL_16K)
+#define FOE_4TB_SIZ		16384
+#define FOE_4TB_BIT             14
+#elif defined(CONFIG_RA_HW_NAT_TBL_32K)
+#define FOE_4TB_SIZ		32768
+#define FOE_4TB_BIT             15
+#endif
+
+#define FOE_ENTRY_SIZ		128	/* for ipv6 backward compatible */
+
+#define IP_FORMAT3(addr) (((unsigned char *)&addr)[3])
+#define IP_FORMAT2(addr) (((unsigned char *)&addr)[2])
+#define IP_FORMAT1(addr) (((unsigned char *)&addr)[1])
+#define IP_FORMAT0(addr) (((unsigned char *)&addr)[0])
+
+struct pkt_parse_result {
+	/*layer2 header */
+	u8 dmac[6];
+	u8 smac[6];
+
+	/*vlan header */
+	u16 vlan_tag;
+	u16 vlan1_gap;
+	u16 vlan1;
+	u16 vlan2_gap;
+	u16 vlan2;
+	u16 vlan_layer;
+
+	/*pppoe header */
+	u32 pppoe_gap;
+	u16 ppp_tag;
+	u16 pppoe_sid;
+
+	/*layer3 header */
+	u16 eth_type;
+	struct iphdr iph;
+	struct ipv6hdr ip6h;
+
+	/*layer4 header */
+	struct tcphdr th;
+	struct udphdr uh;
+
+	u32 pkt_type;
+	u8 is_mcast;
+
+};
+
+struct pkt_rx_parse_result {
+	/*layer2 header */
+	u8 dmac[6];
+	u8 smac[6];
+
+	/*vlan header */
+	u16 vlan_tag;
+	u16 vlan1_gap;
+	u16 vlan1;
+	u16 vlan2_gap;
+	u16 vlan2;
+	u16 vlan_layer;
+
+	/*pppoe header */
+	u32 pppoe_gap;
+	u16 ppp_tag;
+	u16 pppoe_sid;
+
+	/*layer3 header */
+	u16 eth_type;
+	struct iphdr iph;
+	struct ipv6hdr ip6h;
+
+	/*layer4 header */
+	struct tcphdr th;
+	struct udphdr uh;
+
+	u32 pkt_type;
+	u8 is_mcast;
+
+};
+
+ /* TYPEDEFS AND STRUCTURES*/
+enum FOE_TBL_SIZE {
+	FOE_TBL_SIZE_1K,
+	FOE_TBL_SIZE_2K,
+	FOE_TBL_SIZE_4K,
+	FOE_TBL_SIZE_8K,
+	FOE_TBL_SIZE_16K,
+	FOE_TBL_SIZE_32K
+};
+
+enum VLAN_ACTION {
+	NO_ACT = 0,
+	MODIFY = 1,
+	INSERT = 2,
+	DELETE = 3
+};
+
+enum FOE_ENTRY_STATE {
+	INVALID = 0,
+	UNBIND = 1,
+	BIND = 2,
+	FIN = 3
+};
+
+enum FOE_TBL_TCP_UDP {
+	TCP = 0,
+	UDP = 1,
+	ANY = 2
+};
+
+enum FOE_TBL_EE {
+	NOT_ENTRY_END = 0,
+	ENTRY_END_FP = 1,
+	ENTRY_END_FOE = 2
+};
+
+enum FOE_LINK_TYPE {
+	LINK_TO_FOE = 0,
+	LINK_TO_FP = 1
+};
+
+enum FOE_IP_ACT {
+	IPV4_HNAPT = 0,
+	IPV4_HNAT = 1,
+	IPV6_1T_ROUTE = 2,
+	IPV4_DSLITE = 3,
+	IPV6_3T_ROUTE = 4,
+	IPV6_5T_ROUTE = 5,
+	IPV6_6RD = 7,
+	IPV4_MAP_T = 8,
+#if defined(CONFIG_HNAT_V2)
+	IPV4_MAP_E = 9,
+#endif
+#if defined(CONFIG_HNAT_V1)
+	IPV4_MAP_E = 3,
+#endif
+};
+
+enum FOE_ENTRY_FMT {
+	IPV4_NAPT = 0,
+	IPV4_NAT = 1,
+	IPV6_ROUTING = 5
+};
+
+#define IS_IPV4_HNAPT(x)	(((x)->bfib1.pkt_type == IPV4_HNAPT) ? 1 : 0)
+#define IS_IPV4_HNAT(x)		(((x)->bfib1.pkt_type == IPV4_HNAT) ? 1 : 0)
+#define IS_IPV6_1T_ROUTE(x)	(((x)->bfib1.pkt_type == IPV6_1T_ROUTE) ? 1 : 0)
+#define IS_IPV4_DSLITE(x)	(((x)->bfib1.pkt_type == IPV4_DSLITE) ? 1 : 0)
+#define IS_IPV4_MAPE(x)		(((x)->bfib1.pkt_type == IPV4_MAP_E) ? 1 : 0)
+#define IS_IPV4_MAPT(x)		(((x)->bfib1.pkt_type == IPV4_MAP_T) ? 1 : 0)
+#define IS_IPV6_3T_ROUTE(x)	(((x)->bfib1.pkt_type == IPV6_3T_ROUTE) ? 1 : 0)
+#define IS_IPV6_5T_ROUTE(x)	(((x)->bfib1.pkt_type == IPV6_5T_ROUTE) ? 1 : 0)
+#define IS_IPV6_6RD(x)		(((x)->bfib1.pkt_type == IPV6_6RD) ? 1 : 0)
+#define IS_IPV4_GRP(x)		(IS_IPV4_HNAPT(x) | IS_IPV4_HNAT(x))
+#define IS_IPV6_GRP(x) \
+	(IS_IPV6_1T_ROUTE(x) | IS_IPV6_3T_ROUTE(x) | IS_IPV6_5T_ROUTE(x) | IS_IPV6_6RD(x) | \
+	 IS_IPV4_DSLITE(x) | IS_IPV4_MAPE(x))
+/************************************************************/
+
+struct MED_HOST_INFO1_T {
+	unsigned int PPE_ENTRY:15;
+	unsigned int HOST:1;
+	unsigned int CRSN:5;
+	unsigned int LAST:1;
+	unsigned int PIT_CNT:5;
+	unsigned int PIT_IDX_L:5;
+};
+
+struct MED_HOST_INFO2_T {
+	unsigned int HOST_CNT:6;
+	unsigned int RSV:10;
+	unsigned int BID:16;
+};
+
+//MED_HNAT_INFO struct
+struct MED_HNAT_INFO_HOST {
+	struct MED_HOST_INFO1_T dmad_info1;
+	struct MED_HOST_INFO2_T dmad_info2;
+};
+
+
+struct MDMA_RXDMAD_INFO1{
+	unsigned int PDP0;
+};
+
+struct MDMA_RXDMAD_INFO2 {
+	unsigned int RSV0:6;
+	unsigned int RSV1:2;
+	unsigned int PLEN0:16;
+	unsigned int RSV2:6;
+	unsigned int LS0:1;
+	unsigned int DDONE_bit:1;
+};
+
+struct MDMA_RXDMAD_INFO3 {
+	unsigned int TAG:1;
+	unsigned int L4F:1;
+	unsigned int L4VLD:1;
+	unsigned int TACK:1;
+	unsigned int IP4F:1;
+	unsigned int IP4:1;
+	unsigned int IP6:1;
+	unsigned int RSV0:5;
+	unsigned int RSV1:20;
+};
+
+struct MDMA_RXDMAD_INFO4 {
+	unsigned int VID:16;
+	unsigned int VPID:16;
+};
+
+struct MDMA_RXDMAD_INFO5 {
+	unsigned int FOE_ENTRY:15;
+	unsigned int RSV0:3;
+	unsigned int CRSN:5;
+	unsigned int RSV1:3;
+	unsigned int SP:4;
+	unsigned int RSV2:2;
+};
+
+struct MDMA_RXDMAD_INFO6 {
+	unsigned int LRO_FLUSH_RSN:3;
+	unsigned int RSV0:2;
+	unsigned int RSV1:11;
+	unsigned int LRO_AGG_CNT:8;
+	unsigned int RSV2:2;
+	unsigned int RSV3:6;
+};
+
+struct MDMA_RXDMAD_INFO7 {
+	unsigned int RSV;
+};
+
+struct MDMA_RXDMAD_INFO8 {
+	unsigned int RSV;
+};
+
+struct MDMA_rxdmad {
+	struct MDMA_RXDMAD_INFO1 rxd_info1;
+	struct MDMA_RXDMAD_INFO2 rxd_info2;
+	struct MDMA_RXDMAD_INFO3 rxd_info3;
+	struct MDMA_RXDMAD_INFO4 rxd_info4;
+	struct MDMA_RXDMAD_INFO5 rxd_info5;
+	struct MDMA_RXDMAD_INFO6 rxd_info6;
+	struct MDMA_RXDMAD_INFO7 rxd_info7;
+	struct MDMA_RXDMAD_INFO8 rxd_info8;
+};
+
+/*=========================================
+ *    PDMA TX Descriptor Format define
+ *=========================================
+ */
+
+struct MDMA_TXDMAD_INFO1 {
+	unsigned int SDP0;
+};
+
+struct MDMA_TXDMAD_INFO2 {
+	unsigned int RSV0:6;
+	unsigned int RSV1:2;
+	unsigned int SDL0:16;
+	unsigned int RSV2:6;
+	unsigned int LS0:1;
+	unsigned int DDONE:1;
+};
+
+struct MDMA_TXDMAD_INFO3 {
+    unsigned int SDP1;
+};
+
+struct MDMA_TXDMAD_INFO4 {
+	unsigned int RSV0:6;
+	unsigned int RSV1:2;
+	unsigned int SDL1:16;
+	unsigned int RSV2:6;
+	unsigned int LS1:1;
+	unsigned int BURST:1;
+};
+
+struct MDMA_TXDMAD_INFO5 {
+	unsigned int RSV0:16;
+	unsigned int FPORT:4;
+	unsigned int RSV1:2;
+	unsigned int RSV2:6;
+	unsigned int TUI_CO:3;
+	unsigned int TSO:1;
+};
+
+struct MDMA_TXDMAD_INFO6 {
+	unsigned int PID_IDX_LSB:5;
+	unsigned int RSV0:3;
+	unsigned int PID_CNT:5;
+	unsigned int RSV1:3;
+	unsigned int TO_AP:1;
+	unsigned int RSV2:15;
+};
+
+struct MDMA_TXDMAD_INFO7 {
+	unsigned int FRAG_PID:16;
+	unsigned int PID:16;
+};
+
+struct MDMA_TXDMAD_INFO8 {
+	unsigned int RSV;
+};
+
+
+struct MDMA_txdmad {
+	struct MDMA_TXDMAD_INFO1 txd_info1;
+	struct MDMA_TXDMAD_INFO2 txd_info2;
+	struct MDMA_TXDMAD_INFO3 txd_info3;
+	struct MDMA_TXDMAD_INFO4 txd_info4;
+	struct MDMA_TXDMAD_INFO5 txd_info5;
+	struct MDMA_TXDMAD_INFO6 txd_info6;
+	struct MDMA_TXDMAD_INFO7 txd_info7;
+	struct MDMA_TXDMAD_INFO8 txd_info8;
+};
+
+#if defined(CONFIG_HNAT_V2)
+struct ud_info_blk1 {
+	uint32_t time_stamp:8;
+	uint32_t sp:4;
+	uint32_t pcnt:8;
+	uint32_t ilgf:1;
+	uint32_t mc:1;
+	uint32_t preb:1;
+	uint32_t pkt_type:5;
+	uint32_t state:2;
+	uint32_t udp:1;
+	uint32_t sta:1;		/* static entry */
+};
+
+/* state = bind & fin */
+struct bf_info_blk1 {
+	uint32_t time_stamp:8;
+	uint32_t sp:4;
+	uint32_t mc:1;
+	uint32_t ka:1;/* keep alive */
+	uint32_t vlan_layer:3;
+	uint32_t psn:1;/* egress packet has PPPoE session */
+	uint32_t vpm:1;/* 0:ethertype remark, 1:0x8100(CR default) */
+	uint32_t ps:1;/* packet sampling */
+	uint32_t cah:1;/* cacheable flag */
+	uint32_t rmt:1;/* remove tunnel ip header (6rd/dslite only) */
+	uint32_t ttl:1;
+	uint32_t pkt_type:5;
+	uint32_t state:2;
+	uint32_t udp:1;
+	uint32_t sta:1;		/* static entry */
+};
+
+/* state = bind & fin */
+struct _info_blk2 {
+	uint32_t qid:7;		/* QID in Qos Port */
+	uint32_t rsv:1;
+	uint32_t fqos:1;	/* force to PSE QoS port */
+	uint32_t dp:4;		/* force to PSE port x */
+	uint32_t mcast:1;	/* multicast this packet to CPU */
+	uint32_t pcpl:1;	/* OSBN */
+	uint32_t mibf:1;
+	uint32_t alen:1;
+	uint32_t rx_id:2;
+	uint32_t winfo:1;
+	uint32_t acnt:4;
+	uint32_t dscp:8;	/* DSCP value */
+};
+#endif
+#if defined(CONFIG_HNAT_V1)
+/* state = unbind & dynamic */
+struct ud_info_blk1 {
+	uint32_t time_stamp:8;
+	uint32_t pcnt:14;
+	uint32_t ilgf:1;
+	uint32_t mc:1;
+	uint32_t preb:1;
+	uint32_t pkt_type:3;
+	uint32_t state:2;
+	uint32_t udp:1;
+	uint32_t sta:1;		/* static entry */
+};
+
+/* state = bind & fin */
+struct bf_info_blk1 {
+	uint32_t time_stamp:14;
+	uint32_t mc:1;
+	uint32_t ka:1;		/* keep alive */
+	uint32_t vlan_layer:3;
+	uint32_t psn:1;		/* egress packet has PPPoE session */
+
+	uint32_t vpm:1;		/* 0:ethertype remark, 1:0x8100(CR default) */
+	uint32_t ps:1;		/* packet sampling */
+	uint32_t cah:1;		/* cacheable flag */
+	uint32_t rmt:1;		/* remove tunnel ip header (6rd/dslite only) */
+	uint32_t ttl:1;
+	uint32_t pkt_type:3;
+	uint32_t state:2;
+	uint32_t udp:1;
+	uint32_t sta:1;		/* static entry */
+};
+
+/* state = bind & fin */
+struct _info_blk2 {
+	uint32_t qid:4;		/* QID in Qos Port */
+	uint32_t fqos:1;	/* force to PSE QoS port */
+	uint32_t dp:3;		/* force to PSE port x */
+				/*0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP */
+	uint32_t mcast:1;	/* multicast this packet to CPU */
+	uint32_t pcpl:1;	/* OSBN */
+	uint32_t mibf:1;
+	uint32_t alen:1;
+	uint32_t qid1:2;
+	uint32_t dp1:1;
+	uint32_t rx_id:2;
+	uint32_t winfo:1;
+	uint32_t acnt:6;
+	uint32_t dscp:8;	/* DSCP value */
+};
+#endif
+/* Foe Entry (64B) */
+/*      IPV4:			     IPV6: */
+/*	+-----------------------+    +-----------------------+ */
+/*	|  Information Block 1  |    |  Information Block 1  | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|	SIP(4B)		|    |     IPv6_DIP0(4B)     | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|	DIP(4B)		|    |     IPv6_DIP1(4B)     | */
+/*	+-----------------------+    +-----------------------+ */
+/*	| SPORT(2B) | DPORT(2B) |    |	      Rev(4B)        | */
+/*	+-----------+-----------+    +-----------------------+ */
+/*	| Information Block 2   |    |	Information Block 2  | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|      New SIP(4B)	|    |     IPv6_DIP2(4B)     | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|      New DIP(4B)	|    |     IPv6_DIP3(4B)     | */
+/*	+-----------------------+    +-----------------------+ */
+/*	| New SPORT | New DPORT |    |	       Rev(4B)       | */
+/*	+-----------+-----------+    +-----------------------+ */
+/*	| VLAN1(2B) |DMAC[47:32]|    | VLAN1(2B) |DMAC[47:32]| */
+/*	+-----------|-----------+    +-----------|-----------+ */
+/*	|	DMAC[31:0]      |    |       DMAC[31:0]      | */
+/*	+-----------------------+    +-----------------------+ */
+/*	| PPPoE_ID  |SMAC[47:32]|    | PPPoE_ID  |SMAC[47:32]| */
+/*	+-----------+-----------+    +-----------+-----------+ */
+/*	|       SMAC[31:0]      |    |       SMAC[31:0]      | */
+/*	+-----------------------+    +-----------------------+ */
+/*	| Rev |  SNAP_Ctrl(3B)  |    | Rev |  SNAP_Ctrl(3B)  | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|    Rev    | VLAN2(2B) |    |   Rev     | VLAN2(2B) | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|     Rev(4B)           |    |       Rev(4B)         | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|     tmp_buf(4B)       |    |       tmp_buf(4B)     | */
+/*	+-----------------------+    +-----------------------+ */
+/* Foe Entry (80) */
+/* */
+/*      IPV4 HNAPT:			     IPV4: */
+/*	+-----------------------+    +-----------------------+ */
+/*	|  Information Block 1  |    |  Information Block 1  | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|	SIP(4B)		|    |		SIP(4B)      | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|	DIP(4B)		|    |		DIP(4B)      | */
+/*	+-----------------------+    +-----------------------+ */
+/*	| SPORT(2B) | DPORT(2B) |    |	      Rev(4B)        | */
+/*	+-----------+-----------+    +-----------------------+ */
+/*	| EG DSCP| Info Block 2 |    |	Information Block 2  | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|      New SIP(4B)	|    |     New SIP (4B)      | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|      New DIP(4B)	|    |     New DIP (4B)      | */
+/*	+-----------------------+    +-----------------------+ */
+/*	| New SPORT | New DPORT |    | New SPORT | New DPORT | */
+/*	+-----------+-----------+    +-----------------------+ */
+/*	|          REV          |    |		REV	     | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|Act_dp|   REV          |    |Act_dp|	REV	     | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|      tmp_buf(4B)      |    |	     temp_buf(4B)    | */
+/*	+-----------------------+    +-----------|-----------+ */
+/*	| ETYPE     | VLAN1 ID  |    | ETYPE     |  VLAN1    | */
+/*	+-----------+-----------+    +-----------+-----------+ */
+/*	|       DMAC[47:16]     |    |       SMAC[47:16]     | */
+/*	+-----------------------+    +-----------------------+ */
+/*	| DMAC[15:0]| VLAN2 ID  |    | DMAC[15:0]|  VLAN2    | */
+/*	+-----------------------+    +-----------------------+ */
+/*	|       SMAC[47:16]     |    |       SMAC[47:16]     | */
+/*	+-----------------------+    +-----------------------+ */
+/*	| SMAC[15:0]| PPPOE ID  |    | SMAC[15:0]| PPPOE ID  | */
+/*	+-----------------------+    +-----------------------+ */
+/*								 */
+struct _ipv4_hnapt {
+	union {
+		struct ud_info_blk1 udib1;
+		struct bf_info_blk1 bfib1;
+		u32 info_blk1;
+	};
+	u32 sip;
+	u32 dip;
+	u16 dport;
+	u16 sport;
+	union {
+	struct _info_blk2 iblk2;
+		u32 info_blk2;
+	};
+	u32 new_sip;
+	u32 new_dip;
+	u16 new_dport;
+	u16 new_sport;
+	u32 resv1;
+	u32 resv2;
+	uint32_t resv3:26;
+	uint32_t act_dp:6;	/* UDF */
+	u16 vlan1;
+	u16 etype;
+	u8 dmac_hi[4];
+	u16 vlan2_winfo;
+	u8 dmac_lo[2];
+	u8 smac_hi[4];
+	u16 pppoe_id;
+	u8 smac_lo[2];
+	u16 minfo;
+	u16 winfo;
+	u32 tbd0;
+	u32 tbd1;
+	u32 tbd2;
+	u32 tbd3;
+	u32 tbd4;
+	u32 tbd5;
+	u16 rxif_idx;
+	u16 tbd6;
+};
+
+struct _ipv4_dslite {
+	union {
+		struct ud_info_blk1 udib1;
+		struct bf_info_blk1 bfib1;
+		u32 info_blk1;
+	};
+	u32 sip;
+	u32 dip;
+	u16 dport;
+	u16 sport;
+
+	u32 tunnel_sipv6_0;
+	u32 tunnel_sipv6_1;
+	u32 tunnel_sipv6_2;
+	u32 tunnel_sipv6_3;
+
+	u32 tunnel_dipv6_0;
+	u32 tunnel_dipv6_1;
+	u32 tunnel_dipv6_2;
+	u32 tunnel_dipv6_3;
+
+	u8 flow_lbl[3];	/* in order to consist with Linux kernel (should be 20bits) */
+	uint16_t priority:4;	/* in order to consist with Linux kernel (should be 8bits) */
+	uint16_t rxif_idx:4;
+	uint32_t hop_limit:8;
+	uint32_t resv2:18;
+	uint32_t act_dp:6;	/* UDF */
+
+	union {
+	struct _info_blk2 iblk2;
+		u32 info_blk2;
+	};
+
+	u16 vlan1;
+	u16 etype;
+	u8 dmac_hi[4];
+	u16 vlan2_winfo;
+	u8 dmac_lo[2];
+	u8 smac_hi[4];
+	u16 pppoe_id;
+	u8 smac_lo[2];
+	u16 minfo;
+	u16 winfo;
+	u32 new_sip;
+	u32 new_dip;
+	u16 new_dport;
+	u16 new_sport;
+	//u16 rxif_idx;
+};
+
+struct _ipv6_1t_route {
+	union {
+		struct ud_info_blk1 udib1;
+		struct bf_info_blk1 bfib1;
+		u32 info_blk1;
+	};
+	u32 ipv6_dip0;
+	u32 ipv6_dip1;
+	u32 resv;
+
+	union {
+		struct _info_blk2 iblk2;
+		u32 info_blk2;
+	};
+
+	u32 ipv6_dip2;
+	u32 ipv6_dip3;
+	u32 resv1;
+
+	uint32_t act_dp:6;	/* UDF */
+	u16 vlan1;
+	u16 etype;
+	u8 dmac_hi[4];
+	u16 vlan2_winfo;
+	u8 dmac_lo[2];
+	u8 smac_hi[4];
+	u16 pppoe_id;
+	u8 smac_lo[2];
+	u16 minfo;
+	u16 winfo;
+	u32 tbd0;
+	u32 tbd1;
+	u32 tbd2;
+	u32 tbd3;
+	u32 tbd4;
+	u32 tbd5;
+	u16 rxif_idx;
+	u16 tbd6;
+};
+
+struct _ipv6_3t_route {
+	union {
+		struct ud_info_blk1 udib1;
+		struct bf_info_blk1 bfib1;
+		u32 info_blk1;
+	};
+	u32 ipv6_sip0;
+	u32 ipv6_sip1;
+	u32 ipv6_sip2;
+	u32 ipv6_sip3;
+	u32 ipv6_dip0;
+	u32 ipv6_dip1;
+	u32 ipv6_dip2;
+	u32 ipv6_dip3;
+	uint32_t prot:8;
+	uint32_t resv:24;
+
+	u32 resv1;
+	u32 resv2;
+	u32 resv3;
+	uint32_t resv4:26;
+	uint32_t act_dp:6;	/* UDF */
+
+	union {
+		struct _info_blk2 iblk2;
+		u32 info_blk2;
+	};
+	u16 vlan1;
+	u16 etype;
+	u8 dmac_hi[4];
+	u16 vlan2_winfo;
+	u8 dmac_lo[2];
+	u8 smac_hi[4];
+	u16 pppoe_id;
+	u8 smac_lo[2];
+	u16 minfo;
+	u16 winfo;
+	u32 tbd0;
+	u32 tbd1;
+	u16 rxif_idx;
+	u16 tbd2;
+};
+
+struct _ipv6_5t_route {
+	union {
+		struct ud_info_blk1 udib1;
+		struct bf_info_blk1 bfib1;
+		u32 info_blk1;
+	};
+	u32 ipv6_sip0;
+	u32 ipv6_sip1;
+	u32 ipv6_sip2;
+	u32 ipv6_sip3;
+	u32 ipv6_dip0;
+	u32 ipv6_dip1;
+	u32 ipv6_dip2;
+	u32 ipv6_dip3;
+	u16 dport;
+	u16 sport;
+
+	u32 resv1;
+	u32 resv2;
+	u32 resv3;
+	uint32_t resv4:26;
+	uint32_t act_dp:6;	/* UDF */
+
+	union {
+		struct _info_blk2 iblk2;
+		u32 info_blk2;
+	};
+
+	u16 vlan1;
+	u16 etype;
+	u8 dmac_hi[4];
+	u16 vlan2_winfo;
+	u8 dmac_lo[2];
+	u8 smac_hi[4];
+	u16 pppoe_id;
+	u8 smac_lo[2];
+	u16 minfo;
+	u16 winfo;
+	u32 tbd0;
+	u32 tbd1;
+	u16 rxif_idx;
+	u16 tbd2;
+};
+
+struct _ipv6_6rd {
+	union {
+		struct ud_info_blk1 udib1;
+		struct bf_info_blk1 bfib1;
+		u32 info_blk1;
+	};
+	u32 ipv6_sip0;
+	u32 ipv6_sip1;
+	u32 ipv6_sip2;
+	u32 ipv6_sip3;
+	u32 ipv6_dip0;
+	u32 ipv6_dip1;
+	u32 ipv6_dip2;
+	u32 ipv6_dip3;
+	u16 dport;
+	u16 sport;
+
+	u32 tunnel_sipv4;
+	u32 tunnel_dipv4;
+	uint32_t hdr_chksum:16;
+	uint32_t dscp:8;
+	uint32_t ttl:8;
+	uint32_t flag:3;
+	uint32_t resv1:13;
+	uint32_t per_flow_6rd_id:1;
+	uint32_t rxif_idx:9;
+	uint32_t act_dp:6;	/* UDF */
+
+	union {
+	struct _info_blk2 iblk2;
+		u32 info_blk2;
+	};
+
+	u16 vlan1;
+	u16 etype;
+	u8 dmac_hi[4];
+	u16 vlan2_winfo;
+	u8 dmac_lo[2];
+	u8 smac_hi[4];
+	u16 pppoe_id;
+	u8 smac_lo[2];
+	u16 minfo;
+	u16 winfo;
+	u32 tbd0;
+	u32 tbd1;
+	u16 new_dport;
+	u16 new_sport;
+	//u16 tbd2;
+	//u16 rxif_idx;
+};
+
+struct foe_entry {
+	union {
+		struct ud_info_blk1 udib1;
+		struct bf_info_blk1 bfib1;
+		struct _ipv4_hnapt ipv4_hnapt;	/* nat & napt share same data structure */
+		struct _ipv4_dslite ipv4_dslite;
+		struct _ipv6_1t_route ipv6_1t_route;
+		struct _ipv6_3t_route ipv6_3t_route;
+		struct _ipv6_5t_route ipv6_5t_route;
+		struct _ipv6_6rd ipv6_6rd;
+	};
+};
+
+struct ps_entry {
+	u8 en;
+	u8 acl;
+	u16 pkt_len;
+	u16 pkt_cnt;
+	u8 time_period;
+	u8 resv0;
+	u32 resv1;
+	u16 hw_pkt_cnt;
+	u16 hw_time;
+
+};
+
+struct mib_entry {
+	u32 byt_cnt_l;
+	u16 byt_cnt_h;
+	u32 pkt_cnt_l;
+	u8 pkt_cnt_h;
+	u8 resv0;
+	u32 resv1;
+} __packed;
+
+struct foe_pri_key {
+	/* TODO: add new primary key to support dslite, 6rd */
+	unsigned short hash_index;
+	/* Ipv4 */
+	struct {
+		u32 sip;
+		u32 dip;
+		u16 sport;
+		u16 dport;
+		uint32_t is_udp:1;
+	} ipv4_hnapt;
+
+	struct {
+		u32 sip;
+		u32 dip;
+		/* TODO */
+	} ipv4_hnat;
+
+	struct {
+		u32 sip_v4;
+		u32 dip_v4;
+		u32 sip0_v6;
+		u32 sip1_v6;
+		u32 sip2_v6;
+		u32 sip3_v6;
+		u32 dip0_v6;
+		u32 dip1_v6;
+		u32 dip2_v6;
+		u32 dip3_v6;
+		u16 sport;
+		u16 dport;
+		uint32_t is_udp:1;
+		uint32_t rmt:1;
+	} ipv4_dslite;
+
+	/* IPv6 */
+	struct {
+		u32 sip0;
+		u32 sip1;
+		u32 sip2;
+		u32 sip3;
+		u32 dip0;
+		u32 dip1;
+		u32 dip2;
+		u32 dip3;
+		u16 sport;
+		u16 dport;
+		uint32_t is_udp:1;
+	} ipv6_routing;
+
+	struct {
+		u32 sip_v4;
+		u32 dip_v4;
+		u32 sip0_v6;
+		u32 sip1_v6;
+		u32 sip2_v6;
+		u32 sip3_v6;
+		u32 dip0_v6;
+		u32 dip1_v6;
+		u32 dip2_v6;
+		u32 dip3_v6;
+		u16 sport;
+		u16 dport;
+		uint32_t is_udp:1;
+		uint32_t rmt:1;
+	} ipv6_6rd;
+
+	u32 pkt_type;	/* entry format */
+};
+
+void foe_set_mac_hi_info(u8 *dst, uint8_t *src);
+void foe_set_mac_lo_info(u8 *dst, uint8_t *src);
+void foe_dump_entry(uint32_t index, struct foe_entry *entry);
+int foe_get_all_entries(struct hwnat_args *opt);
+int foe_bind_entry(struct hwnat_args *opt);
+int foe_un_bind_entry(struct hwnat_args *opt);
+int foe_drop_entry(struct hwnat_args *opt);
+int foe_del_entry_by_num(uint32_t entry_num);
+void foe_tbl_clean(void);
+int foe_dump_cache_entry(void);
+/* EXPORT FUNCTION*/
+int32_t get_pppoe_sid(struct sk_buff *skb, uint32_t vlan_gap, uint16_t *sid, uint16_t *ppp_tag);
+int ppe_set_bind_threshold(uint32_t threshold);
+int ppe_set_max_entry_limit(u32 full, uint32_t half, uint32_t qurt);
+int ppe_set_ka_interval(u8 tcp_ka, uint8_t udp_ka);
+int ppe_set_unbind_lifetime(uint8_t lifetime);
+int ppe_set_bind_lifetime(u16 tcp_fin, uint16_t udp_life, uint16_t fin_life);
+void ppe_set_entry_bind(struct sk_buff *skb, struct foe_entry *entry);
+int32_t ppe_fill_L2_info(struct sk_buff *skb, struct foe_entry *entry,
+			 struct pkt_parse_result *ppe_parse_result);
+int32_t ppe_fill_L3_info(struct sk_buff *skb, struct foe_entry *entry,
+			 struct pkt_parse_result *ppe_parse_result);
+int32_t ppe_fill_L4_info(struct sk_buff *skb, struct foe_entry *entry,
+			 struct pkt_parse_result *ppe_parse_result);
+int32_t ppe_setforce_port_info(struct sk_buff *skb, struct foe_entry *entry, int gmac_no);
+struct net_device *ra_dev_get_by_name(const char *name);
+int32_t is8021Q(u16 eth_type, struct pkt_parse_result *ppe_parse_result);
+int32_t is_special_tag(u16 eth_type, struct pkt_parse_result *ppe_parse_result);
+int32_t is_hw_vlan_tx(struct sk_buff *skb, struct pkt_parse_result *ppe_parse_result);
+#if defined(CONFIG_SUPPORT_WLAN_OPTIMIZE)
+int32_t ppe_rx_parse_layer_info(struct sk_buff *skb);
+#endif
+void ppe_set_cache_ebl(void);
+void update_foe_ac_timer_handler(unsigned long unused);
+int foe_add_entry(struct hwnat_tuple *opt);
+int foe_del_entry(struct hwnat_tuple *opt);
+void set_qid(struct sk_buff *skb);
+void foe_clear_entry(struct neighbour *neigh);
+int foe_add_entry_dvt(struct hwnat_tuple *opt);
+int get_ppe_entry_idx(struct foe_pri_key *key, struct foe_entry *entry, int del);
+int get_mib_entry_idx(struct foe_pri_key *key, struct foe_entry *entry);
+void ppe_mib_dump_ppe0(unsigned int entry_num, unsigned long *pkt_cnt, unsigned long *byte_cnt);
+void ppe_mib_dump_ppe1(unsigned int entry_num, unsigned long *pkt_cnt, unsigned long *byte_cnt);
+int ppe_get_dev_stats_handler(struct net_device *dev, struct rtnl_link_stats64 *storage);
+void ppe_init_mib_counter(void);
+void ppe_start_mib_timer(struct sk_buff *skb, struct foe_entry *entry);
+void ppe_reset_dev_mib(struct net_device *dev);
+void set_rxif_idx(struct foe_entry *entry, u16 value);
+uint32_t get_act_dp(struct foe_entry *entry);
+uint32_t get_rxif_idx(struct foe_entry *entry);
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb_32k_dvt b/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb_32k_dvt
new file mode 100755
index 0000000..8ce2213
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/foe_fdb_32k_dvt
@@ -0,0 +1,1773 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "ra_nat.h"
+#include "frame_engine.h"
+#include "foe_fdb.h"
+#include "hnat_ioctl.h"
+#include "util.h"
+#include "hnat_config.h"
+#include "hnat_define.h"
+#include "hnat_common.h"
+
+struct pkt_rx_parse_result ppe_parse_rx_result;
+
+extern struct foe_entry *ppe_foe_base;
+
+#define DD \
+{\
+pr_info("%s %d\n", __func__, __LINE__); \
+}
+
+/* 4          2         0 */
+/* +----------+---------+ */
+/* |      DMAC[47:16]   | */
+/* +--------------------+ */
+/* |DMAC[15:0]| 2nd VID | */
+/* +----------+---------+ */
+/* 4          2         0 */
+/* +----------+---------+ */
+/* |      SMAC[47:16]   | */
+/* +--------------------+ */
+/* |SMAC[15:0]| PPPOE ID| */
+/* +----------+---------+ */
+/* Ex: */
+/* Mac=01:22:33:44:55:66 */
+/* 4          2         0 */
+/* +----------+---------+ */
+/* |     01:22:33:44    | */
+/* +--------------------+ */
+/* |  55:66   | PPPOE ID| */
+/* +----------+---------+ */
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+int hash_ipv6(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	u32 t_hvt_31, t_hvt_63, t_hvt_95, t_hvt_sd;
+	u32 t_hvt_sd_23, t_hvt_sd_31_24, t_hash_32, t_hashs_16, t_ha16k, hash_index;
+	u32 ppe_saddr_127_96, ppe_saddr_95_64, ppe_saddr_63_32, ppe_saddr_31_0;
+	u32 ppe_daddr_127_96, ppe_daddr_95_64, ppe_daddr_63_32, ppe_daddr_31_0;
+	u32 ipv6_sip_127_96, ipv6_sip_95_64, ipv6_sip_63_32, ipv6_sip_31_0;
+	u32 ipv6_dip_127_96, ipv6_dip_95_64, ipv6_dip_63_32, ipv6_dip_31_0;
+	u32 sport, dport, ppe_sportv6, ppe_dportv6;
+
+	ipv6_sip_127_96 = key->ipv6_routing.sip0;
+	ipv6_sip_95_64 = key->ipv6_routing.sip1;
+	ipv6_sip_63_32 = key->ipv6_routing.sip2;
+	ipv6_sip_31_0 = key->ipv6_routing.sip3;
+	ipv6_dip_127_96 = key->ipv6_routing.dip0;
+	ipv6_dip_95_64 = key->ipv6_routing.dip1;
+	ipv6_dip_63_32 = key->ipv6_routing.dip2;
+	ipv6_dip_31_0 = key->ipv6_routing.dip3;
+	sport = key->ipv6_routing.sport;
+	dport = key->ipv6_routing.dport;
+
+	t_hvt_31 = ipv6_sip_31_0 ^ ipv6_dip_31_0 ^ (sport << 16 | dport);
+	t_hvt_63 = ipv6_sip_63_32 ^ ipv6_dip_63_32 ^ ipv6_dip_127_96;
+	t_hvt_95 = ipv6_sip_95_64 ^ ipv6_dip_95_64 ^ ipv6_sip_127_96;
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+	ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+	ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+	ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+	ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+	ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+	ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+	ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+	ppe_sportv6 = entry->ipv6_5t_route.sport;
+	ppe_dportv6 = entry->ipv6_5t_route.dport;
+	if (del != 1) {
+		if (entry->ipv6_5t_route.bfib1.state == BIND) {
+			pr_info("IPV6 Hash collision, hash index +1\n");
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+		}
+		if (entry->ipv6_5t_route.bfib1.state == BIND) {
+			pr_info("IPV6 Hash collision can not bind\n");
+			return -1;
+		}
+	} else if (del == 1) {
+		if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+		    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+		    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+		    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+		    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+		} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+			ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+			ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+			ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+			ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+			ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+			ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+			ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+			ppe_sportv6 = entry->ipv6_5t_route.sport;
+			ppe_dportv6 = entry->ipv6_5t_route.dport;
+			if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+			    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+			    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+			    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+			    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+			} else {
+				if (fe_feature & SEMI_AUTO_MODE)
+					pr_info("Ipv6 Entry delete : Entry Not found\n");
+				else if (fe_feature & MANUAL_MODE)
+					pr_info("Ipv6 hash collision hwnat can not found\n");
+				return -1;
+			}
+		}
+	}
+	return hash_index;
+}
+
+int hash_mib_ipv6(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	u32 t_hvt_31, t_hvt_63, t_hvt_95, t_hvt_sd;
+	u32 t_hvt_sd_23, t_hvt_sd_31_24, t_hash_32, t_hashs_16, t_ha16k, hash_index;
+	u32 ppe_saddr_127_96, ppe_saddr_95_64, ppe_saddr_63_32, ppe_saddr_31_0;
+	u32 ppe_daddr_127_96, ppe_daddr_95_64, ppe_daddr_63_32, ppe_daddr_31_0;
+	u32 ipv6_sip_127_96, ipv6_sip_95_64, ipv6_sip_63_32, ipv6_sip_31_0;
+	u32 ipv6_dip_127_96, ipv6_dip_95_64, ipv6_dip_63_32, ipv6_dip_31_0;
+	u32 sport, dport, ppe_sportv6, ppe_dportv6;
+
+	ipv6_sip_127_96 = key->ipv6_routing.sip0;
+	ipv6_sip_95_64 = key->ipv6_routing.sip1;
+	ipv6_sip_63_32 = key->ipv6_routing.sip2;
+	ipv6_sip_31_0 = key->ipv6_routing.sip3;
+	ipv6_dip_127_96 = key->ipv6_routing.dip0;
+	ipv6_dip_95_64 = key->ipv6_routing.dip1;
+	ipv6_dip_63_32 = key->ipv6_routing.dip2;
+	ipv6_dip_31_0 = key->ipv6_routing.dip3;
+	sport = key->ipv6_routing.sport;
+	dport = key->ipv6_routing.dport;
+
+	t_hvt_31 = ipv6_sip_31_0 ^ ipv6_dip_31_0 ^ (sport << 16 | dport);
+	t_hvt_63 = ipv6_sip_63_32 ^ ipv6_dip_63_32 ^ ipv6_dip_127_96;
+	t_hvt_95 = ipv6_sip_95_64 ^ ipv6_dip_95_64 ^ ipv6_sip_127_96;
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+	ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+	ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+	ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+	ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+	ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+	ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+	ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+	ppe_sportv6 = entry->ipv6_5t_route.sport;
+	ppe_dportv6 = entry->ipv6_5t_route.dport;
+
+	if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+	    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+	    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+	    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+	    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+		if (debug_level >= 1)
+			pr_info("mib: ipv6 entry found entry idx = %d\n", hash_index);
+	} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+			ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+			ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+			ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+			ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+			ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+			ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+			ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+			ppe_sportv6 = entry->ipv6_5t_route.sport;
+			ppe_dportv6 = entry->ipv6_5t_route.dport;
+			if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+			    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+			    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+			    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+			    (sport == ppe_sportv6) && (dport == ppe_dportv6)) {
+				if (debug_level >= 1)
+					pr_info("mib: ipv6 entry found entry idx = %d\n", hash_index);
+			} else {
+				if (debug_level >= 1)
+					pr_info("mib: ipv6 entry not found\n");
+				return -1;
+			}
+	}
+
+	return hash_index;
+}
+#endif
+
+int hash_ipv4(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	u32 t_hvt_31;
+	u32 t_hvt_63;
+	u32 t_hvt_95;
+	u32 t_hvt_sd;
+
+	u32 t_hvt_sd_23;
+	u32 t_hvt_sd_31_24;
+	u32 t_hash_32;
+	u32 t_hashs_16;
+	u32 t_ha16k;
+	u32 hash_index;
+	u32 ppe_saddr, ppe_daddr, ppe_sport, ppe_dport, saddr, daddr, sport, dport;
+
+	saddr = key->ipv4_hnapt.sip;
+	daddr = key->ipv4_hnapt.dip;
+	sport = key->ipv4_hnapt.sport;
+	dport = key->ipv4_hnapt.dport;
+
+	t_hvt_31 = sport << 16 | dport;
+	t_hvt_63 = daddr;
+	t_hvt_95 = saddr;
+
+	/* pr_info("saddr = %x, daddr=%x, sport=%d, dport=%d\n", saddr, daddr, sport, dport); */
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr = entry->ipv4_hnapt.sip;
+	ppe_daddr = entry->ipv4_hnapt.dip;
+	ppe_sport = entry->ipv4_hnapt.sport;
+	ppe_dport = entry->ipv4_hnapt.dport;
+
+	if (del != 1) {
+		if (entry->ipv4_hnapt.bfib1.state == BIND) {
+			pr_info("Hash collision, hash index +1\n");
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+		}
+		if (entry->ipv4_hnapt.bfib1.state == BIND) {
+			pr_info("Hash collision can not bind\n");
+			return -1;
+		}
+	} else if (del == 1) {
+		if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+		    (sport == ppe_sport) && (dport == ppe_dport)) {
+		} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr = entry->ipv4_hnapt.sip;
+			ppe_daddr = entry->ipv4_hnapt.dip;
+			ppe_sport = entry->ipv4_hnapt.sport;
+			ppe_dport = entry->ipv4_hnapt.dport;
+			if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+			    (sport == ppe_sport) && (dport == ppe_dport)) {
+			} else {
+				if (fe_feature & SEMI_AUTO_MODE)
+					pr_info("hash collision hwnat can not found\n");
+				else if (fe_feature & MANUAL_MODE)
+					pr_info("Entry delete : Entry Not found\n");
+
+				return -1;
+			}
+		}
+	}
+	return hash_index;
+}
+
+int hash_mib_ipv4(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	u32 t_hvt_31;
+	u32 t_hvt_63;
+	u32 t_hvt_95;
+	u32 t_hvt_sd;
+
+	u32 t_hvt_sd_23;
+	u32 t_hvt_sd_31_24;
+	u32 t_hash_32;
+	u32 t_hashs_16;
+	u32 t_ha16k;
+	u32 hash_index;
+	u32 ppe_saddr, ppe_daddr, ppe_sport, ppe_dport, saddr, daddr, sport, dport;
+
+	saddr = key->ipv4_hnapt.sip;
+	daddr = key->ipv4_hnapt.dip;
+	sport = key->ipv4_hnapt.sport;
+	dport = key->ipv4_hnapt.dport;
+
+	t_hvt_31 = sport << 16 | dport;
+	t_hvt_63 = daddr;
+	t_hvt_95 = saddr;
+
+	/* pr_info("saddr = %x, daddr=%x, sport=%d, dport=%d\n", saddr, daddr, sport, dport); */
+	if (DFL_FOE_HASH_MODE == 1)	/* hash mode 1 */
+		t_hvt_sd = (t_hvt_31 & t_hvt_63) | ((~t_hvt_31) & t_hvt_95);
+	else                            /* hash mode 2 */
+		t_hvt_sd = t_hvt_63 ^ (t_hvt_31 & (~t_hvt_95));
+
+	t_hvt_sd_23 = t_hvt_sd & 0xffffff;
+	t_hvt_sd_31_24 = t_hvt_sd & 0xff000000;
+	t_hash_32 = t_hvt_31 ^ t_hvt_63 ^ t_hvt_95 ^ ((t_hvt_sd_23 << 8) | (t_hvt_sd_31_24 >> 24));
+	t_hashs_16 = ((t_hash_32 & 0xffff0000) >> 16) ^ (t_hash_32 & 0xfffff);
+
+	if (FOE_4TB_SIZ == 16384)
+		t_ha16k = t_hashs_16 & 0x1fff;  /* FOE_16k */
+	else if (FOE_4TB_SIZ == 8192)
+		t_ha16k = t_hashs_16 & 0xfff;  /* FOE_8k */
+	else if (FOE_4TB_SIZ == 4096)
+		t_ha16k = t_hashs_16 & 0x7ff;  /* FOE_4k */
+	else if (FOE_4TB_SIZ == 2048)
+		t_ha16k = t_hashs_16 & 0x3ff;  /* FOE_2k */
+	else
+		t_ha16k = t_hashs_16 & 0x1ff;  /* FOE_1k */
+	hash_index = (u32)t_ha16k * 2;
+
+	entry = &ppe_foe_base[hash_index];
+	ppe_saddr = entry->ipv4_hnapt.sip;
+	ppe_daddr = entry->ipv4_hnapt.dip;
+	ppe_sport = entry->ipv4_hnapt.sport;
+	ppe_dport = entry->ipv4_hnapt.dport;
+
+	if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+	    (sport == ppe_sport) && (dport == ppe_dport)) {
+		if (debug_level >= 1)
+			pr_info("mib: ipv4 entry entry : %d\n", hash_index);
+	} else {
+			hash_index = hash_index + 1;
+			entry = &ppe_foe_base[hash_index];
+			ppe_saddr = entry->ipv4_hnapt.sip;
+			ppe_daddr = entry->ipv4_hnapt.dip;
+			ppe_sport = entry->ipv4_hnapt.sport;
+			ppe_dport = entry->ipv4_hnapt.dport;
+			if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+			    (sport == ppe_sport) && (dport == ppe_dport)) {
+				if (debug_level >= 1)
+					pr_info("mib: ipv4 entry entry : %d\n", hash_index);
+			} else {
+				if (debug_level >= 1)
+					pr_info("mib: ipv4 entry not found\n");
+				return -1;
+			}
+			return hash_index;
+	}
+
+	return hash_index;
+}
+
+int get_ppe_entry_idx(struct foe_pri_key *key, struct foe_entry *entry, int del)
+{
+	if ((key->pkt_type) == IPV4_NAPT)
+		return hash_ipv4(key, entry, del);
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+	else if ((key->pkt_type) == IPV6_ROUTING)
+		return hash_ipv6(key, entry, del);
+#endif
+	else
+		return -1;
+}
+
+int get_mib_entry_idx(struct foe_pri_key *key, struct foe_entry *entry)
+{
+	if ((key->pkt_type) == IPV4_NAPT)
+		return hash_mib_ipv4(key, entry);
+#if defined(CONFIG_RA_HW_NAT_IPV6)
+	else if ((key->pkt_type) == IPV6_ROUTING)
+		return hash_mib_ipv6(key, entry);
+#endif
+	else
+		return -1;
+}
+EXPORT_SYMBOL(get_mib_entry_idx);
+
+void foe_set_mac_hi_info(u8 *dst, uint8_t *src)
+{
+	dst[3] = src[0];
+	dst[2] = src[1];
+	dst[1] = src[2];
+	dst[0] = src[3];
+}
+
+void foe_set_mac_lo_info(u8 *dst, uint8_t *src)
+{
+	dst[1] = src[4];
+	dst[0] = src[5];
+}
+
+static int is_request_done(void)
+{
+	int count = 1000;
+
+	/* waiting for 1sec to make sure action was finished */
+	do {
+		if (((reg_read(CAH_CTRL) >> 8) & 0x1) == 0)
+			return 1;
+		usleep_range(1000, 1100);
+	} while (--count);
+
+	return 0;
+}
+
+#define MAX_CACHE_LINE_NUM		32
+int foe_dump_cache_entry(void)
+{
+	int line = 0;
+	int state = 0;
+	int tag = 0;
+	int cah_en = 0;
+	int i = 0;
+
+	cah_en = reg_read(CAH_CTRL) & 0x1;
+
+	if (!cah_en) {
+		pr_debug("Cache is not enabled\n");
+		return 0;
+	}
+
+	/* cache disable */
+	reg_modify_bits(CAH_CTRL, 0, 0, 1);
+
+	pr_debug(" No--|---State---|----Tag-----\n");
+	pr_debug("-----+-----------+------------\n");
+	for (line = 0; line < MAX_CACHE_LINE_NUM; line++) {
+		/* set line number */
+		reg_modify_bits(CAH_LINE_RW, line, 0, 15);
+
+		/* OFFSET_RW = 0x1F (Get Entry Number) */
+		reg_modify_bits(CAH_LINE_RW, 0x1F, 16, 8);
+
+		/* software access cache command = read */
+		reg_modify_bits(CAH_CTRL, 2, 12, 2);
+
+		/* trigger software access cache request */
+		reg_modify_bits(CAH_CTRL, 1, 8, 1);
+
+		if (is_request_done()) {
+			tag = (reg_read(CAH_RDATA) & 0xFFFF);
+			state = ((reg_read(CAH_RDATA) >> 16) & 0x3);
+			pr_debug("%04d | %s   | %05d\n", line,
+				 (state == 3) ? " Lock  " :
+				 (state == 2) ? " Dirty " :
+				 (state == 1) ? " Valid " : "Invalid", tag);
+		} else {
+			pr_debug("%s is timeout (%d)\n", __func__, line);
+		}
+
+		/* software access cache command = read */
+		reg_modify_bits(CAH_CTRL, 3, 12, 2);
+
+		reg_write(CAH_WDATA, 0);
+
+		/* trigger software access cache request */
+		reg_modify_bits(CAH_CTRL, 1, 8, 1);
+
+		if (!is_request_done())
+			pr_debug("%s is timeout (%d)\n", __func__, line);
+		/* dump first 16B for each foe entry */
+		pr_debug("==========<Flow Table Entry=%d >===============\n", tag);
+		for (i = 0; i < 16; i++) {
+			reg_modify_bits(CAH_LINE_RW, i, 16, 8);
+
+			/* software access cache command = read */
+			reg_modify_bits(CAH_CTRL, 2, 12, 2);
+
+			/* trigger software access cache request */
+			reg_modify_bits(CAH_CTRL, 1, 8, 1);
+
+			if (is_request_done())
+				pr_debug("%02d  %08X\n", i, reg_read(CAH_RDATA));
+			else
+				pr_debug("%s is timeout (%d)\n", __func__, line);
+
+			/* software access cache command = write */
+			reg_modify_bits(CAH_CTRL, 3, 12, 2);
+
+			reg_write(CAH_WDATA, 0);
+
+			/* trigger software access cache request */
+			reg_modify_bits(CAH_CTRL, 1, 8, 1);
+
+			if (!is_request_done())
+				pr_debug("%s is timeout (%d)\n", __func__, line);
+		}
+		pr_debug("=========================================\n");
+	}
+
+	/* clear cache table before enabling cache */
+	reg_modify_bits(CAH_CTRL, 1, 9, 1);
+	reg_modify_bits(CAH_CTRL, 0, 9, 1);
+
+	/* cache enable */
+	reg_modify_bits(CAH_CTRL, 1, 0, 1);
+
+	return 1;
+}
+
+void foe_dump_entry(uint32_t index)
+{
+	struct foe_entry *entry = &ppe_foe_base[index];
+	struct ps_entry *ps_entry = &ppe_ps_base[index];
+
+	u32 *p = (uint32_t *)entry;
+	u32 i = 0;
+	u32 print_cnt;
+
+	NAT_PRINT("==========<Flow Table Entry=%d (%p)>===============\n", index, entry);
+	if (debug_level >= 2) {
+		if (fe_feature & HNAT_IPV6)
+			print_cnt = 20;
+		else
+			print_cnt = 16;
+
+		for (i = 0; i < print_cnt; i++)
+			NAT_PRINT("%02d: %08X\n", i, *(p + i));
+	}
+	NAT_PRINT("-----------------<Flow Info>------------------\n");
+	NAT_PRINT("Information Block 1: %08X\n", entry->ipv4_hnapt.info_blk1);
+
+	if (IS_IPV4_HNAPT(entry)) {
+		NAT_PRINT("Information Block 2=%x (FP=%d FQOS=%d QID=%d)",
+			  entry->ipv4_hnapt.info_blk2,
+			  entry->ipv4_hnapt.info_blk2 >> 5 & 0x7,
+			  entry->ipv4_hnapt.info_blk2 >> 4 & 0x1,
+			  (entry->ipv4_hnapt.iblk2.qid) +
+			  ((entry->ipv4_hnapt.iblk2.qid1 & 0x03) << 4));
+		if (hnat_chip_name & MT7622_HWNAT) {
+			NAT_PRINT("Information Block 2=%x (FP=%d FQOS=%d QID=%d)",
+				  entry->ipv4_hnapt.info_blk2,
+				  entry->ipv4_hnapt.info_blk2 >> 5 & 0x7,
+				  entry->ipv4_hnapt.info_blk2 >> 4 & 0x1,
+				  (entry->ipv4_hnapt.iblk2.qid) +
+				  ((entry->ipv4_hnapt.iblk2.qid1 & 0x03) << 4));
+		} else {
+			NAT_PRINT("Information Block 2=%x (FP=%d FQOS=%d QID=%d)",
+				  entry->ipv4_hnapt.info_blk2,
+				  entry->ipv4_hnapt.info_blk2 >> 5 & 0x7,
+				  entry->ipv4_hnapt.info_blk2 >> 4 & 0x1,
+				  entry->ipv4_hnapt.iblk2.qid);
+		}
+		NAT_PRINT("Create IPv4 HNAPT entry\n");
+		NAT_PRINT
+		    ("IPv4 Org IP/Port: %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_hnapt.sip), IP_FORMAT2(entry->ipv4_hnapt.sip),
+		     IP_FORMAT1(entry->ipv4_hnapt.sip), IP_FORMAT0(entry->ipv4_hnapt.sip),
+		     entry->ipv4_hnapt.sport,
+		     IP_FORMAT3(entry->ipv4_hnapt.dip), IP_FORMAT2(entry->ipv4_hnapt.dip),
+		     IP_FORMAT1(entry->ipv4_hnapt.dip), IP_FORMAT0(entry->ipv4_hnapt.dip),
+		     entry->ipv4_hnapt.dport);
+		NAT_PRINT
+		    ("IPv4 New IP/Port: %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_hnapt.new_sip), IP_FORMAT2(entry->ipv4_hnapt.new_sip),
+		     IP_FORMAT1(entry->ipv4_hnapt.new_sip), IP_FORMAT0(entry->ipv4_hnapt.new_sip),
+		     entry->ipv4_hnapt.new_sport,
+		     IP_FORMAT3(entry->ipv4_hnapt.new_dip), IP_FORMAT2(entry->ipv4_hnapt.new_dip),
+		     IP_FORMAT1(entry->ipv4_hnapt.new_dip), IP_FORMAT0(entry->ipv4_hnapt.new_dip),
+		     entry->ipv4_hnapt.new_dport);
+	} else if (IS_IPV4_HNAT(entry)) {
+		NAT_PRINT("Information Block 2: %08X\n", entry->ipv4_hnapt.info_blk2);
+		NAT_PRINT("Create IPv4 HNAT entry\n");
+		NAT_PRINT("IPv4 Org IP: %u.%u.%u.%u->%u.%u.%u.%u\n",
+			  IP_FORMAT3(entry->ipv4_hnapt.sip), IP_FORMAT2(entry->ipv4_hnapt.sip),
+			  IP_FORMAT1(entry->ipv4_hnapt.sip), IP_FORMAT0(entry->ipv4_hnapt.sip),
+			  IP_FORMAT3(entry->ipv4_hnapt.dip), IP_FORMAT2(entry->ipv4_hnapt.dip),
+			  IP_FORMAT1(entry->ipv4_hnapt.dip), IP_FORMAT0(entry->ipv4_hnapt.dip));
+		NAT_PRINT("IPv4 New IP: %u.%u.%u.%u->%u.%u.%u.%u\n",
+			  IP_FORMAT3(entry->ipv4_hnapt.new_sip), IP_FORMAT2(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT1(entry->ipv4_hnapt.new_sip), IP_FORMAT0(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT3(entry->ipv4_hnapt.new_dip), IP_FORMAT2(entry->ipv4_hnapt.new_dip),
+			  IP_FORMAT1(entry->ipv4_hnapt.new_dip), IP_FORMAT0(entry->ipv4_hnapt.new_dip));
+	}
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_1T_ROUTE(entry)) {
+			NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_1t_route.info_blk2);
+			NAT_PRINT("Create IPv6 Route entry\n");
+			NAT_PRINT("Destination IPv6: %08X:%08X:%08X:%08X",
+				  entry->ipv6_1t_route.ipv6_dip3, entry->ipv6_1t_route.ipv6_dip2,
+				  entry->ipv6_1t_route.ipv6_dip1, entry->ipv6_1t_route.ipv6_dip0);
+		} else if (IS_IPV4_DSLITE(entry)) {
+			NAT_PRINT("Information Block 2: %08X\n", entry->ipv4_dslite.info_blk2);
+			NAT_PRINT("Create IPv4 Ds-Lite entry\n");
+			NAT_PRINT
+			    ("IPv4 Ds-Lite: %u.%u.%u.%u.%d->%u.%u.%u.%u:%d\n ",
+			     IP_FORMAT3(entry->ipv4_dslite.sip), IP_FORMAT2(entry->ipv4_dslite.sip),
+			     IP_FORMAT1(entry->ipv4_dslite.sip), IP_FORMAT0(entry->ipv4_dslite.sip),
+			     entry->ipv4_dslite.sport,
+			     IP_FORMAT3(entry->ipv4_dslite.dip), IP_FORMAT2(entry->ipv4_dslite.dip),
+			     IP_FORMAT1(entry->ipv4_dslite.dip), IP_FORMAT0(entry->ipv4_dslite.dip),
+			     entry->ipv4_dslite.dport);
+			NAT_PRINT("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+				  entry->ipv4_dslite.tunnel_sipv6_0, entry->ipv4_dslite.tunnel_sipv6_1,
+				  entry->ipv4_dslite.tunnel_sipv6_2, entry->ipv4_dslite.tunnel_sipv6_3,
+				  entry->ipv4_dslite.tunnel_dipv6_0, entry->ipv4_dslite.tunnel_dipv6_1,
+				  entry->ipv4_dslite.tunnel_dipv6_2, entry->ipv4_dslite.tunnel_dipv6_3);
+		} else if (IS_IPV6_3T_ROUTE(entry)) {
+			NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_3t_route.info_blk2);
+			NAT_PRINT("Create IPv6 3-Tuple entry\n");
+			NAT_PRINT
+			    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Prot=%d)\n",
+			     entry->ipv6_3t_route.ipv6_sip0, entry->ipv6_3t_route.ipv6_sip1,
+			     entry->ipv6_3t_route.ipv6_sip2, entry->ipv6_3t_route.ipv6_sip3,
+			     entry->ipv6_3t_route.ipv6_dip0, entry->ipv6_3t_route.ipv6_dip1,
+			     entry->ipv6_3t_route.ipv6_dip2, entry->ipv6_3t_route.ipv6_dip3,
+			     entry->ipv6_3t_route.prot);
+		} else if (IS_IPV6_5T_ROUTE(entry)) {
+			NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_5t_route.info_blk2);
+			NAT_PRINT("Create IPv6 5-Tuple entry\n");
+			if (IS_IPV6_FLAB_EBL()) {
+				NAT_PRINT
+				    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Flow Label=%08X)\n",
+				     entry->ipv6_5t_route.ipv6_sip0, entry->ipv6_5t_route.ipv6_sip1,
+				     entry->ipv6_5t_route.ipv6_sip2, entry->ipv6_5t_route.ipv6_sip3,
+				     entry->ipv6_5t_route.ipv6_dip0, entry->ipv6_5t_route.ipv6_dip1,
+				     entry->ipv6_5t_route.ipv6_dip2, entry->ipv6_5t_route.ipv6_dip3,
+				     ((entry->ipv6_5t_route.sport << 16) | (entry->ipv6_5t_route.
+									    dport)) & 0xFFFFF);
+			} else {
+				NAT_PRINT
+				    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+				     entry->ipv6_5t_route.ipv6_sip0, entry->ipv6_5t_route.ipv6_sip1,
+				     entry->ipv6_5t_route.ipv6_sip2, entry->ipv6_5t_route.ipv6_sip3,
+				     entry->ipv6_5t_route.sport, entry->ipv6_5t_route.ipv6_dip0,
+				     entry->ipv6_5t_route.ipv6_dip1, entry->ipv6_5t_route.ipv6_dip2,
+				     entry->ipv6_5t_route.ipv6_dip3, entry->ipv6_5t_route.dport);
+			}
+		} else if (IS_IPV6_6RD(entry)) {
+			NAT_PRINT("Information Block 2: %08X\n", entry->ipv6_6rd.info_blk2);
+			NAT_PRINT("Create IPv6 6RD entry\n");
+			if (IS_IPV6_FLAB_EBL()) {
+				NAT_PRINT
+				    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Flow Label=%08X)\n",
+				     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+				     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+				     entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1,
+				     entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3,
+				     ((entry->ipv6_5t_route.sport << 16) | (entry->ipv6_5t_route.
+									    dport)) & 0xFFFFF);
+			} else {
+				NAT_PRINT
+				    ("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+				     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+				     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+				     entry->ipv6_6rd.sport, entry->ipv6_6rd.ipv6_dip0,
+				     entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2,
+				     entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport);
+			}
+		}
+	}
+	if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) {
+		NAT_PRINT("DMAC=%02X:%02X:%02X:%02X:%02X:%02X SMAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
+			  entry->ipv4_hnapt.dmac_hi[3], entry->ipv4_hnapt.dmac_hi[2],
+			  entry->ipv4_hnapt.dmac_hi[1], entry->ipv4_hnapt.dmac_hi[0],
+			  entry->ipv4_hnapt.dmac_lo[1], entry->ipv4_hnapt.dmac_lo[0],
+			  entry->ipv4_hnapt.smac_hi[3], entry->ipv4_hnapt.smac_hi[2],
+			  entry->ipv4_hnapt.smac_hi[1], entry->ipv4_hnapt.smac_hi[0],
+			  entry->ipv4_hnapt.smac_lo[1], entry->ipv4_hnapt.smac_lo[0]);
+		NAT_PRINT("State = %s, ",
+			  entry->bfib1.state ==
+			  0 ? "Invalid" : entry->bfib1.state ==
+			  1 ? "Unbind" : entry->bfib1.state ==
+			  2 ? "BIND" : entry->bfib1.state ==
+			  3 ? "FIN" : "Unknown");
+		NAT_PRINT("Vlan_Layer = %u, ",
+			  entry->bfib1.vlan_layer);
+		NAT_PRINT("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+			  entry->ipv4_hnapt.etype, entry->ipv4_hnapt.vlan1,
+			  entry->ipv4_hnapt.vlan2_winfo);
+		NAT_PRINT("mib = %d, multicast = %d, pppoe = %d, proto = %s\n",
+			  entry->ipv4_hnapt.iblk2.mibf,
+			  entry->ipv4_hnapt.iblk2.mcast,
+			  entry->ipv4_hnapt.bfib1.psn,
+			  entry->ipv4_hnapt.bfib1.udp == 0 ? "TCP" :
+			  entry->ipv4_hnapt.bfib1.udp == 1 ? "UDP" : "Unknown");
+		NAT_PRINT("=========================================\n\n");
+	} else {
+		if (fe_feature & HNAT_IPV6) {
+			NAT_PRINT("DMAC=%02X:%02X:%02X:%02X:%02X:%02X SMAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
+				  entry->ipv6_5t_route.dmac_hi[3], entry->ipv6_5t_route.dmac_hi[2],
+				  entry->ipv6_5t_route.dmac_hi[1], entry->ipv6_5t_route.dmac_hi[0],
+				  entry->ipv6_5t_route.dmac_lo[1], entry->ipv6_5t_route.dmac_lo[0],
+				  entry->ipv6_5t_route.smac_hi[3], entry->ipv6_5t_route.smac_hi[2],
+				  entry->ipv6_5t_route.smac_hi[1], entry->ipv6_5t_route.smac_hi[0],
+				  entry->ipv6_5t_route.smac_lo[1], entry->ipv6_5t_route.smac_lo[0]);
+			NAT_PRINT("State = %s, STC = %s, ", entry->bfib1.state ==
+				  0 ? "Invalid" : entry->bfib1.state ==
+				  1 ? "Unbind" : entry->bfib1.state ==
+				  2 ? "BIND" : entry->bfib1.state ==
+				  3 ? "FIN" : "Unknown", entry->bfib1.sta ==
+				  0 ? "Dynamic" : entry->bfib1.sta ==
+				  1 ? "static" : "Unknown");
+
+			NAT_PRINT("Vlan_Layer = %u, ",
+				  entry->bfib1.vlan_layer);
+			NAT_PRINT("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n",
+				  entry->ipv6_5t_route.etype,
+				  entry->ipv6_5t_route.vlan1,
+				  entry->ipv6_5t_route.vlan2_winfo);
+			NAT_PRINT("mib = %d, multicast = %d, pppoe = %d, proto = %s",
+				  entry->ipv6_5t_route.iblk2.mibf,
+				  entry->ipv6_5t_route.iblk2.mcast,
+				  entry->ipv6_5t_route.bfib1.psn,
+				  entry->ipv6_5t_route.bfib1.udp ==
+				  0 ? "TCP" : entry->ipv6_5t_route.bfib1.udp ==
+				  1 ? "UDP" : "Unknown");
+			NAT_PRINT(" Remove tunnel = %u\n", entry->bfib1.rmt);
+			NAT_PRINT("=========================================\n\n");
+		}
+	}
+
+	if (fe_feature & PACKET_SAMPLING) {
+		p = (uint32_t *)ps_entry;
+
+		NAT_PRINT("==========<PS Table Entry=%d (%p)>===============\n", index, ps_entry);
+		for (i = 0; i < 4; i++)
+			pr_debug("%02d: %08X\n", i, *(p + i));
+	}
+}
+
+int foe_get_all_entries(struct hwnat_args *opt1)
+{
+	struct foe_entry *entry;
+	int hash_index = 0;
+	int count = 0;		/* valid entry count */
+
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+		entry = &ppe_foe_base[hash_index];
+		if (entry->bfib1.state == opt1->entry_state) {
+			opt1->entries[count].hash_index = hash_index;
+			opt1->entries[count].pkt_type = entry->ipv4_hnapt.bfib1.pkt_type;
+
+			if (IS_IPV4_HNAT(entry)) {
+				opt1->entries[count].ing_sipv4 = entry->ipv4_hnapt.sip;
+				opt1->entries[count].ing_dipv4 = entry->ipv4_hnapt.dip;
+				opt1->entries[count].eg_sipv4 = entry->ipv4_hnapt.new_sip;
+				opt1->entries[count].eg_dipv4 = entry->ipv4_hnapt.new_dip;
+				count++;
+			} else if (IS_IPV4_HNAPT(entry)) {
+				opt1->entries[count].ing_sipv4 = entry->ipv4_hnapt.sip;
+				opt1->entries[count].ing_dipv4 = entry->ipv4_hnapt.dip;
+				opt1->entries[count].eg_sipv4 = entry->ipv4_hnapt.new_sip;
+				opt1->entries[count].eg_dipv4 = entry->ipv4_hnapt.new_dip;
+				opt1->entries[count].ing_sp = entry->ipv4_hnapt.sport;
+				opt1->entries[count].ing_dp = entry->ipv4_hnapt.dport;
+				opt1->entries[count].eg_sp = entry->ipv4_hnapt.new_sport;
+				opt1->entries[count].eg_dp = entry->ipv4_hnapt.new_dport;
+				count++;
+			}
+			if (fe_feature & HNAT_IPV6) {
+				if (IS_IPV6_1T_ROUTE(entry)) {
+					opt1->entries[count].ing_dipv6_0 = entry->ipv6_1t_route.ipv6_dip3;
+					opt1->entries[count].ing_dipv6_1 = entry->ipv6_1t_route.ipv6_dip2;
+					opt1->entries[count].ing_dipv6_2 = entry->ipv6_1t_route.ipv6_dip1;
+					opt1->entries[count].ing_dipv6_3 = entry->ipv6_1t_route.ipv6_dip0;
+					count++;
+				} else if (IS_IPV4_DSLITE(entry)) {
+					opt1->entries[count].ing_sipv4 = entry->ipv4_dslite.sip;
+					opt1->entries[count].ing_dipv4 = entry->ipv4_dslite.dip;
+					opt1->entries[count].ing_sp = entry->ipv4_dslite.sport;
+					opt1->entries[count].ing_dp = entry->ipv4_dslite.dport;
+					opt1->entries[count].eg_sipv6_0 = entry->ipv4_dslite.tunnel_sipv6_0;
+					opt1->entries[count].eg_sipv6_1 = entry->ipv4_dslite.tunnel_sipv6_1;
+					opt1->entries[count].eg_sipv6_2 = entry->ipv4_dslite.tunnel_sipv6_2;
+					opt1->entries[count].eg_sipv6_3 = entry->ipv4_dslite.tunnel_sipv6_3;
+					opt1->entries[count].eg_dipv6_0 = entry->ipv4_dslite.tunnel_dipv6_0;
+					opt1->entries[count].eg_dipv6_1 = entry->ipv4_dslite.tunnel_dipv6_1;
+					opt1->entries[count].eg_dipv6_2 = entry->ipv4_dslite.tunnel_dipv6_2;
+					opt1->entries[count].eg_dipv6_3 = entry->ipv4_dslite.tunnel_dipv6_3;
+					count++;
+				} else if (IS_IPV6_3T_ROUTE(entry)) {
+					opt1->entries[count].ing_sipv6_0 = entry->ipv6_3t_route.ipv6_sip0;
+					opt1->entries[count].ing_sipv6_1 = entry->ipv6_3t_route.ipv6_sip1;
+					opt1->entries[count].ing_sipv6_2 = entry->ipv6_3t_route.ipv6_sip2;
+					opt1->entries[count].ing_sipv6_3 = entry->ipv6_3t_route.ipv6_sip3;
+					opt1->entries[count].ing_dipv6_0 = entry->ipv6_3t_route.ipv6_dip0;
+					opt1->entries[count].ing_dipv6_1 = entry->ipv6_3t_route.ipv6_dip1;
+					opt1->entries[count].ing_dipv6_2 = entry->ipv6_3t_route.ipv6_dip2;
+					opt1->entries[count].ing_dipv6_3 = entry->ipv6_3t_route.ipv6_dip3;
+					opt1->entries[count].prot = entry->ipv6_3t_route.prot;
+					count++;
+				} else if (IS_IPV6_5T_ROUTE(entry)) {
+					opt1->entries[count].ing_sipv6_0 = entry->ipv6_5t_route.ipv6_sip0;
+					opt1->entries[count].ing_sipv6_1 = entry->ipv6_5t_route.ipv6_sip1;
+					opt1->entries[count].ing_sipv6_2 = entry->ipv6_5t_route.ipv6_sip2;
+					opt1->entries[count].ing_sipv6_3 = entry->ipv6_5t_route.ipv6_sip3;
+					opt1->entries[count].ing_sp = entry->ipv6_5t_route.sport;
+					opt1->entries[count].ing_dp = entry->ipv6_5t_route.dport;
+
+					opt1->entries[count].ing_dipv6_0 = entry->ipv6_5t_route.ipv6_dip0;
+					opt1->entries[count].ing_dipv6_1 = entry->ipv6_5t_route.ipv6_dip1;
+					opt1->entries[count].ing_dipv6_2 = entry->ipv6_5t_route.ipv6_dip2;
+					opt1->entries[count].ing_dipv6_3 = entry->ipv6_5t_route.ipv6_dip3;
+					opt1->entries[count].ipv6_flowlabel = IS_IPV6_FLAB_EBL();
+					count++;
+				} else if (IS_IPV6_6RD(entry)) {
+					opt1->entries[count].ing_sipv6_0 = entry->ipv6_6rd.ipv6_sip0;
+					opt1->entries[count].ing_sipv6_1 = entry->ipv6_6rd.ipv6_sip1;
+					opt1->entries[count].ing_sipv6_2 = entry->ipv6_6rd.ipv6_sip2;
+					opt1->entries[count].ing_sipv6_3 = entry->ipv6_6rd.ipv6_sip3;
+
+					opt1->entries[count].ing_dipv6_0 = entry->ipv6_6rd.ipv6_dip0;
+					opt1->entries[count].ing_dipv6_1 = entry->ipv6_6rd.ipv6_dip1;
+					opt1->entries[count].ing_dipv6_2 = entry->ipv6_6rd.ipv6_dip2;
+					opt1->entries[count].ing_dipv6_3 = entry->ipv6_6rd.ipv6_dip3;
+					opt1->entries[count].ing_sp = entry->ipv6_6rd.sport;
+					opt1->entries[count].ing_dp = entry->ipv6_6rd.dport;
+					opt1->entries[count].ipv6_flowlabel = IS_IPV6_FLAB_EBL();
+
+					opt1->entries[count].eg_sipv4 = entry->ipv6_6rd.tunnel_sipv4;
+					opt1->entries[count].eg_dipv4 = entry->ipv6_6rd.tunnel_dipv4;
+					count++;
+				}
+			}
+		}
+	}
+	opt1->num_of_entries = count;
+
+	if (opt1->num_of_entries > 0)
+		return HWNAT_SUCCESS;
+	else
+		return HWNAT_ENTRY_NOT_FOUND;
+}
+
+int foe_bind_entry(struct hwnat_args *opt1)
+{
+	struct foe_entry *entry;
+
+	entry = &ppe_foe_base[opt1->entry_num];
+
+	/* restore right information block1 */
+	entry->bfib1.time_stamp = reg_read(FOE_TS) & 0xFFFF;
+	entry->bfib1.state = BIND;
+
+	return HWNAT_SUCCESS;
+}
+
+int foe_un_bind_entry(struct hwnat_args *opt)
+{
+	struct foe_entry *entry;
+
+	entry = &ppe_foe_base[opt->entry_num];
+
+	entry->ipv4_hnapt.udib1.state = INVALID;
+	entry->ipv4_hnapt.udib1.time_stamp = reg_read(FOE_TS) & 0xFF;
+
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+
+	return HWNAT_SUCCESS;
+}
+
+int _foe_drop_entry(unsigned int entry_num)
+{
+	struct foe_entry *entry;
+
+	entry = &ppe_foe_base[entry_num];
+
+	entry->ipv4_hnapt.iblk2.dp = 7;
+
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+
+	return HWNAT_SUCCESS;
+}
+EXPORT_SYMBOL(_foe_drop_entry);
+
+int foe_drop_entry(struct hwnat_args *opt)
+{
+	return _foe_drop_entry(opt->entry_num);
+}
+
+int foe_del_entry_by_num(uint32_t entry_num)
+{
+	struct foe_entry *entry;
+
+	entry = &ppe_foe_base[entry_num];
+	memset(entry, 0, sizeof(struct foe_entry));
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+
+	return HWNAT_SUCCESS;
+}
+
+void foe_tbl_clean(void)
+{
+	u32 foe_tbl_size;
+
+	foe_tbl_size = FOE_4TB_SIZ * sizeof(struct foe_entry);
+	memset(ppe_foe_base, 0, foe_tbl_size);
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+}
+EXPORT_SYMBOL(foe_tbl_clean);
+
+void hw_nat_l2_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		foe_set_mac_hi_info(entry->ipv4_hnapt.dmac_hi, opt->dmac);
+		foe_set_mac_lo_info(entry->ipv4_hnapt.dmac_lo, opt->dmac);
+		foe_set_mac_hi_info(entry->ipv4_hnapt.smac_hi, opt->smac);
+		foe_set_mac_lo_info(entry->ipv4_hnapt.smac_lo, opt->smac);
+		entry->ipv4_hnapt.vlan1 = opt->vlan1;
+		/* warp hwnat not support vlan2 */
+		/*mt7622 wifi hwnat not support vlan2*/
+		//entry->ipv4_hnapt.vlan2_winfo = opt->vlan2;
+		entry->ipv4_hnapt.etype = opt->etype;
+		entry->ipv4_hnapt.pppoe_id = opt->pppoe_id;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			foe_set_mac_hi_info(entry->ipv6_5t_route.dmac_hi, opt->dmac);
+			foe_set_mac_lo_info(entry->ipv6_5t_route.dmac_lo, opt->dmac);
+			foe_set_mac_hi_info(entry->ipv6_5t_route.smac_hi, opt->smac);
+			foe_set_mac_lo_info(entry->ipv6_5t_route.smac_lo, opt->smac);
+			entry->ipv6_5t_route.vlan1 = opt->vlan1;
+			/*mt7622 wifi hwnat not support vlan2*/
+			//entry->ipv6_5t_route.vlan2_winfo = opt->vlan2;
+			entry->ipv6_5t_route.etype = opt->etype;
+			entry->ipv6_5t_route.pppoe_id = opt->pppoe_id;
+		}
+	} else if ((opt->pkt_type) == IPV4_DSLITE) {
+		foe_set_mac_hi_info(entry->ipv4_dslite.dmac_hi, opt->dmac);
+		foe_set_mac_lo_info(entry->ipv4_dslite.dmac_lo, opt->dmac);
+		foe_set_mac_hi_info(entry->ipv4_dslite.smac_hi, opt->smac);
+		foe_set_mac_lo_info(entry->ipv4_dslite.smac_lo, opt->smac);
+		entry->ipv4_dslite.vlan1 = opt->vlan1;
+			/*mt7622 wifi hwnat not support vlan2*/
+		//entry->ipv4_dslite.vlan2_winfo = opt->vlan2;
+		entry->ipv4_dslite.pppoe_id = opt->pppoe_id;
+		entry->ipv4_dslite.etype = opt->etype;
+	} else if ((opt->pkt_type) == IPV6_6RD) {
+		foe_set_mac_hi_info(entry->ipv6_6rd.dmac_hi, opt->dmac);
+		foe_set_mac_lo_info(entry->ipv6_6rd.dmac_lo, opt->dmac);
+		foe_set_mac_hi_info(entry->ipv6_6rd.smac_hi, opt->smac);
+		foe_set_mac_lo_info(entry->ipv6_6rd.smac_lo, opt->smac);
+		entry->ipv6_6rd.vlan1 = opt->vlan1;
+			/*mt7622 wifi hwnat not support vlan2*/
+		entry->ipv6_6rd.vlan2_winfo = opt->vlan2;
+		entry->ipv6_6rd.pppoe_id = opt->pppoe_id;
+		entry->ipv6_6rd.etype = opt->etype;
+	}
+}
+
+struct test_hdr {
+        u8	flow_lbl[3];
+};
+
+void hw_nat_l3_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	struct test_hdr test;
+	
+	test.flow_lbl[0] = 0x56;
+	test.flow_lbl[1] = 0x12;
+	test.flow_lbl[2] = 0xab;
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		entry->ipv4_hnapt.sip = opt->ing_sipv4;
+		entry->ipv4_hnapt.dip = opt->ing_dipv4;
+		entry->ipv4_hnapt.new_sip = opt->eg_sipv4;
+		entry->ipv4_hnapt.new_dip = opt->eg_dipv4;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			entry->ipv6_5t_route.ipv6_sip0 = opt->ing_sipv6_0;
+			entry->ipv6_5t_route.ipv6_sip1 = opt->ing_sipv6_1;
+			entry->ipv6_5t_route.ipv6_sip2 = opt->ing_sipv6_2;
+			entry->ipv6_5t_route.ipv6_sip3 = opt->ing_sipv6_3;
+
+			entry->ipv6_5t_route.ipv6_dip0 = opt->ing_dipv6_0;
+			entry->ipv6_5t_route.ipv6_dip1 = opt->ing_dipv6_1;
+			entry->ipv6_5t_route.ipv6_dip2 = opt->ing_dipv6_2;
+			entry->ipv6_5t_route.ipv6_dip3 = opt->ing_dipv6_3;
+		}
+
+/*		pr_info("opt->ing_sipv6_0 = %x\n", opt->ing_sipv6_0);*/
+/*		pr_info("opt->ing_sipv6_1 = %x\n", opt->ing_sipv6_1);*/
+/*		pr_info("opt->ing_sipv6_2 = %x\n", opt->ing_sipv6_2);*/
+/*		pr_info("opt->ing_sipv6_3 = %x\n", opt->ing_sipv6_3);*/
+/*		pr_info("opt->ing_dipv6_0 = %x\n", opt->ing_dipv6_0);*/
+/*		pr_info("opt->ing_dipv6_1 = %x\n", opt->ing_dipv6_1);*/
+/*		pr_info("opt->ing_dipv6_2 = %x\n", opt->ing_dipv6_2);*/
+/*		pr_info("opt->ing_dipv6_3 = %x\n", opt->ing_dipv6_3);*/
+
+/*		pr_info("entry->ipv6_5t_route.ipv6_sip0 = %x\n", entry->ipv6_5t_route.ipv6_sip0);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_sip1 = %x\n", entry->ipv6_5t_route.ipv6_sip1);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_sip2 = %x\n", entry->ipv6_5t_route.ipv6_sip2);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_sip3 = %x\n", entry->ipv6_5t_route.ipv6_sip3);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_dip0 = %x\n", entry->ipv6_5t_route.ipv6_dip0);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_dip1 = %x\n", entry->ipv6_5t_route.ipv6_dip1);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_dip2 = %x\n", entry->ipv6_5t_route.ipv6_dip2);*/
+/*		pr_info("entry->ipv6_5t_route.ipv6_dip3 = %x\n", entry->ipv6_5t_route.ipv6_dip3);*/
+	} else if((opt->pkt_type) == IPV4_DSLITE){
+		entry->ipv4_dslite.tunnel_sipv6_0 = opt->ing_sipv6_0;
+		entry->ipv4_dslite.tunnel_sipv6_1 = opt->ing_sipv6_1;
+		entry->ipv4_dslite.tunnel_sipv6_2 = opt->ing_sipv6_2;
+		entry->ipv4_dslite.tunnel_sipv6_3 = opt->ing_sipv6_3;
+		entry->ipv4_dslite.tunnel_dipv6_0 = opt->ing_dipv6_0;
+		entry->ipv4_dslite.tunnel_dipv6_1 = opt->ing_dipv6_1;
+		entry->ipv4_dslite.tunnel_dipv6_2 = opt->ing_dipv6_2;
+		entry->ipv4_dslite.tunnel_dipv6_3 = opt->ing_dipv6_3;
+		entry->ipv4_dslite.iblk2.mibf = 1;
+		entry->ipv4_dslite.priority = 0xf;
+		entry->ipv4_dslite.hop_limit = 120;
+		memcpy(entry->ipv4_dslite.flow_lbl, test.flow_lbl, 3);
+		/* IPv4 DS-Lite and IPv6 6RD shall be turn on by SW during initialization */
+		entry->bfib1.pkt_type = IPV4_DSLITE;
+	} else if((opt->pkt_type) == IPV6_6RD){
+		/* fill in ipv4 6rd entry */
+		entry->ipv6_6rd.tunnel_sipv4 = opt->ing_sipv4;
+		entry->ipv6_6rd.tunnel_dipv4 = opt->ing_dipv4;
+		//entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(&ppe_parse_result->iph);
+		entry->ipv6_6rd.hdr_chksum = opt->checksum;
+		entry->ipv6_6rd.flag = opt->frag;
+		entry->ipv6_6rd.ttl = opt->ttl;
+		entry->ipv6_6rd.dscp = 0;
+		entry->ipv6_6rd.iblk2.mibf = 1;
+		
+		pr_info("PPE_HASH_SEED = %x\n", reg_read(PPE_HASH_SEED));
+		reg_modify_bits(PPE_HASH_SEED, 0, 0, 16);
+		entry->ipv6_6rd.per_flow_6rd_id = 1;
+		/* IPv4 DS-Lite and IPv6 6RD shall be turn on by SW during initialization */
+		entry->bfib1.pkt_type = IPV6_6RD;
+	}
+}
+
+void hw_nat_l4_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		entry->ipv4_hnapt.dport = opt->ing_dp;
+		entry->ipv4_hnapt.sport = opt->ing_sp;
+		entry->ipv4_hnapt.new_dport = opt->eg_dp;
+		entry->ipv4_hnapt.new_sport = opt->eg_sp;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			entry->ipv6_5t_route.dport = opt->ing_dp;
+			entry->ipv6_5t_route.sport = opt->ing_sp;
+		}
+	}
+}
+
+void hw_nat_ib1_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		entry->ipv4_hnapt.bfib1.pkt_type = IPV4_NAPT;
+		entry->ipv4_hnapt.bfib1.sta = 1;
+		entry->ipv4_hnapt.bfib1.udp = opt->is_udp; /* tcp/udp */
+		entry->ipv4_hnapt.bfib1.state = BIND;
+		entry->ipv4_hnapt.bfib1.ka = 1; /* keepalive */
+		entry->ipv4_hnapt.bfib1.ttl = 0; /* TTL-1 */
+		entry->ipv4_hnapt.bfib1.psn = opt->pppoe_act; /* insert / remove */
+		entry->ipv4_hnapt.bfib1.vlan_layer = opt->vlan_layer;
+		entry->ipv4_hnapt.bfib1.time_stamp = reg_read(FOE_TS) & 0xFFFF;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			entry->ipv6_5t_route.bfib1.pkt_type = IPV6_ROUTING;
+			entry->ipv6_5t_route.bfib1.sta = 1;
+			entry->ipv6_5t_route.bfib1.udp = opt->is_udp; /* tcp/udp */
+			entry->ipv6_5t_route.bfib1.state = BIND;
+			entry->ipv6_5t_route.bfib1.ka = 1; /* keepalive */
+			entry->ipv6_5t_route.bfib1.ttl = 0; /* TTL-1 */
+			entry->ipv6_5t_route.bfib1.psn = opt->pppoe_act; /* insert / remove */
+			entry->ipv6_5t_route.bfib1.vlan_layer = opt->vlan_layer;
+			entry->ipv6_5t_route.bfib1.time_stamp = reg_read(FOE_TS) & 0xFFFF;
+		}
+	} else if ((opt->pkt_type) == IPV4_DSLITE){
+		entry->ipv4_dslite.bfib1.rmt = opt->rmt;
+		if (opt->rmt == 0)
+			entry->ipv4_dslite.bfib1.pkt_type = IPV4_DSLITE;
+			
+		entry->ipv4_dslite.bfib1.sta = 1;
+		entry->ipv4_dslite.bfib1.udp = opt->is_udp; /* tcp/udp */
+		entry->ipv4_dslite.bfib1.state = BIND;
+		entry->ipv4_dslite.bfib1.ka = 1; /* keepalive */
+		entry->ipv4_dslite.bfib1.ttl = 0; /* TTL-1 */
+		entry->ipv4_dslite.bfib1.vlan_layer = opt->vlan_layer;
+		entry->ipv4_dslite.bfib1.time_stamp = reg_read(FOE_TS) & 0xFFFF;
+	} else if ((opt->pkt_type) == IPV6_6RD){
+		entry->ipv6_6rd.bfib1.rmt = opt->rmt;
+		if (opt->rmt == 0)
+			entry->ipv6_6rd.bfib1.pkt_type = IPV6_6RD;
+			
+		entry->ipv6_6rd.bfib1.sta = 1;
+		entry->ipv6_6rd.bfib1.udp = opt->is_udp; /* tcp/udp */
+		entry->ipv6_6rd.bfib1.state = BIND;
+		entry->ipv6_6rd.bfib1.ka = 1; /* keepalive */
+		entry->ipv6_6rd.bfib1.ttl = 0; /* TTL-1 */
+		entry->ipv6_6rd.bfib1.vlan_layer = opt->vlan_layer;
+		entry->ipv6_6rd.bfib1.time_stamp = reg_read(FOE_TS) & 0xFFFF;
+	}
+}
+
+void hw_nat_ib2_info(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		entry->ipv4_hnapt.iblk2.dp = opt->dst_port; /* 0:cpu, 1:GE1 */
+		entry->ipv4_hnapt.iblk2.dscp = opt->dscp;
+		entry->ipv4_hnapt.iblk2.acnt = opt->dst_port;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			entry->ipv6_5t_route.iblk2.dp = opt->dst_port; /* 0:cpu, 1:GE1 */
+			entry->ipv6_5t_route.iblk2.dscp = opt->dscp;
+			entry->ipv6_5t_route.iblk2.acnt = opt->dst_port;
+		}
+	} else if ((opt->pkt_type) == IPV4_DSLITE){
+		entry->ipv4_dslite.iblk2.dp = opt->dst_port; /* 0:cpu, 1:GE1 */
+		entry->ipv4_dslite.iblk2.dscp = opt->dscp;
+		entry->ipv4_dslite.iblk2.acnt = opt->dst_port;
+	} else if ((opt->pkt_type) == IPV6_6RD){
+		entry->ipv6_6rd.iblk2.dp = opt->dst_port; /* 0:cpu, 1:GE1 */
+		entry->ipv6_6rd.iblk2.dscp = opt->dscp;
+		entry->ipv6_6rd.iblk2.acnt = opt->dst_port;
+	}
+}
+
+void hw_nat_semi_bind(struct foe_entry *entry, struct hwnat_tuple *opt)
+{
+	u32 current_time;
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		/* Set Current time to time_stamp field in information block 1 */
+		current_time = reg_read(FOE_TS) & 0xFFFF;
+		entry->bfib1.time_stamp = (uint16_t)current_time;
+		/* Ipv4: TTL / Ipv6: Hot Limit filed */
+		entry->ipv4_hnapt.bfib1.ttl = DFL_FOE_TTL_REGEN;
+		/* enable cache by default */
+		entry->ipv4_hnapt.bfib1.cah = 1;
+		/* Change Foe Entry State to Binding State */
+		entry->bfib1.state = BIND;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			/* Set Current time to time_stamp field in information block 1 */
+			current_time = reg_read(FOE_TS) & 0xFFFF;
+			entry->bfib1.time_stamp = (uint16_t)current_time;
+			/* Ipv4: TTL / Ipv6: Hot Limit filed */
+			entry->ipv4_hnapt.bfib1.ttl = DFL_FOE_TTL_REGEN;
+			/* enable cache by default */
+			entry->ipv4_hnapt.bfib1.cah = 1;
+			/* Change Foe Entry State to Binding State */
+			entry->bfib1.state = BIND;
+		}
+	}
+}
+
+int set_done_bit_zero(struct foe_entry *foe_entry)
+{
+	if (IS_IPV4_HNAT(foe_entry) || IS_IPV4_HNAPT(foe_entry))
+		foe_entry->ipv4_hnapt.resv1 = 0;
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV4_DSLITE(foe_entry)) {
+			//foe_entry->ipv4_dslite.resv1 = 0;
+		} else if (IS_IPV6_3T_ROUTE(foe_entry)) {
+			foe_entry->ipv6_3t_route.resv1 = 0;
+		} else if (IS_IPV6_5T_ROUTE(foe_entry)) {
+			foe_entry->ipv6_5t_route.resv1 = 0;
+		} else if (IS_IPV6_6RD(foe_entry)) {
+			foe_entry->ipv6_6rd.resv1 = 0;
+		} else {
+			pr_info("%s:get packet format something wrong\n", __func__);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int get_entry_done_bit(struct foe_entry *foe_entry)
+{
+	int done_bit;
+
+	if (IS_IPV4_HNAT(foe_entry) || IS_IPV4_HNAPT(foe_entry))
+		done_bit = foe_entry->ipv4_hnapt.resv1;
+#if(0)
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV4_DSLITE(foe_entry)) {
+			done_bit = foe_entry->ipv4_dslite.resv1;
+		} else if (IS_IPV6_3T_ROUTE(foe_entry)) {
+			done_bit = foe_entry->ipv6_3t_route.resv1;
+		} else if (IS_IPV6_5T_ROUTE(foe_entry)) {
+			done_bit = foe_entry->ipv6_5t_route.resv1;
+		} else if (IS_IPV6_6RD(foe_entry)) {
+			done_bit = foe_entry->ipv6_6rd.resv1;
+		} else {
+			pr_info("%s:get packet format something wrong\n", __func__);
+			return -1;
+		}
+	}
+#endif
+	return done_bit;
+}
+
+int foe_add_entry_dvt(struct hwnat_tuple *opt)
+{
+	
+	struct foe_entry *entry = NULL;
+	s32 hash_index;
+	u8 i;
+
+
+	hash_index = opt->hash_index;
+	pr_info("opt->hash_index = %d, FOE_4TB_SIZ -%d\n", opt->hash_index, FOE_4TB_SIZ);
+	//if (hash_index != 0) {
+	for (i = 0; i < FOE_4TB_SIZ; i++) {
+		//entry =  &ppe_foe_base[hash_index];
+		if (i <= 8191)
+			entry =  &ppe_foe_base[i*4];
+		else if (i <= 16383)
+			entry =  &ppe_foe_base[((i*4) % FOE_4TB_SIZ) + 1];
+		else if (i <= 24575)
+			entry =  &ppe_foe_base[((i*4) % FOE_4TB_SIZ) + 2];
+		else if (i <= 32767)
+			entry =  &ppe_foe_base[((i*4) % FOE_4TB_SIZ) + 3];
+
+		hw_nat_l2_info(entry, opt);
+		hw_nat_l3_info(entry, opt);
+		if ((opt->pkt_type) != IPV4_DSLITE || (opt->pkt_type) != IPV6_6RD)
+			hw_nat_l4_info(entry, opt);
+			
+		hw_nat_ib1_info(entry, opt);
+		hw_nat_ib2_info(entry, opt);
+		opt->ing_sipv4 = (opt->ing_sipv4) + 1;
+		//foe_dump_entry(hash_index);
+	}
+		return HWNAT_SUCCESS;
+	//}
+	pr_info("No entry idx!!!\n");
+
+	return HWNAT_FAIL;
+}
+
+int foe_add_entry(struct hwnat_tuple *opt)
+{
+	struct foe_pri_key key;
+	struct foe_entry *entry = NULL;
+	s32 hash_index;
+	int done_bit;
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		key.ipv4_hnapt.sip = opt->ing_sipv4;
+		key.ipv4_hnapt.dip = opt->ing_dipv4;
+		key.ipv4_hnapt.sport = opt->ing_sp;
+		key.ipv4_hnapt.dport = opt->ing_dp;
+		key.ipv4_hnapt.is_udp = opt->is_udp;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		key.ipv6_routing.sip0 = opt->ing_sipv6_0;
+		key.ipv6_routing.sip1 = opt->ing_sipv6_1;
+		key.ipv6_routing.sip2 = opt->ing_sipv6_2;
+		key.ipv6_routing.sip3 = opt->ing_sipv6_3;
+		key.ipv6_routing.dip0 = opt->ing_dipv6_0;
+		key.ipv6_routing.dip1 = opt->ing_dipv6_1;
+		key.ipv6_routing.dip2 = opt->ing_dipv6_2;
+		key.ipv6_routing.dip3 = opt->ing_dipv6_3;
+		key.ipv6_routing.sport = opt->ing_sp;
+		key.ipv6_routing.dport = opt->ing_dp;
+		key.ipv6_routing.is_udp = opt->is_udp;
+	} else if ((opt->pkt_type) == IPV4_DSLITE) {
+		key.ipv4_dslite.sip_v4 = opt->ing_sipv4;
+		key.ipv4_dslite.dip_v4 = opt->ing_dipv4;
+		key.ipv4_dslite.sip0_v6 = opt->ing_sipv6_0;
+		key.ipv4_dslite.sip1_v6 = opt->ing_sipv6_1;
+		key.ipv4_dslite.sip2_v6 = opt->ing_sipv6_2;
+		key.ipv4_dslite.sip3_v6 = opt->ing_sipv6_3;
+		key.ipv4_dslite.dip0_v6 = opt->ing_dipv6_0;
+		key.ipv4_dslite.dip1_v6 = opt->ing_dipv6_1;
+		key.ipv4_dslite.dip2_v6 = opt->ing_dipv6_2;
+		key.ipv4_dslite.dip3_v6 = opt->ing_dipv6_3;
+		key.ipv4_dslite.sport = opt->ing_sp;
+		key.ipv4_dslite.dport = opt->ing_dp;
+		key.ipv4_dslite.is_udp = opt->is_udp;
+		
+	}
+
+
+	key.pkt_type = opt->pkt_type;
+#if(0)
+	if (fe_feature & MANUAL_MODE)
+		hash_index = get_ppe_entry_idx(&key, entry, 0);
+	else
+		hash_index = get_ppe_entry_idx(&key, entry, 1);
+#endif
+	
+	hash_index = opt->hash_index;
+	pr_info("opt->hash_index = %d\n", opt->hash_index);
+	if (hash_index != -1) {
+		//opt->hash_index = hash_index;
+		entry =  &ppe_foe_base[hash_index];
+		//if (fe_feature & MANUAL_MODE) {
+			hw_nat_l2_info(entry, opt);
+			hw_nat_l3_info(entry, opt);
+		if ((opt->pkt_type) != IPV4_DSLITE || (opt->pkt_type) != IPV6_6RD) {
+			hw_nat_l4_info(entry, opt);
+		}
+			
+			hw_nat_ib1_info(entry, opt);
+			hw_nat_ib2_info(entry, opt);
+		//}
+		if (fe_feature & SEMI_AUTO_MODE) {
+			done_bit = get_entry_done_bit(entry);
+			if (done_bit == 1)
+				pr_info("mtk_entry_add number =%d\n", hash_index);
+			else if (done_bit == 0)
+				pr_info("ppe table not ready\n");
+			else
+				pr_info("%s: done_bit something wrong\n", __func__);
+
+			if (done_bit != 1)
+				return HWNAT_FAIL;
+			hw_nat_semi_bind(entry, opt);
+		}
+		foe_dump_entry(hash_index);
+		return HWNAT_SUCCESS;
+	}
+
+	return HWNAT_FAIL;
+}
+
+int foe_del_entry(struct hwnat_tuple *opt)
+{
+	struct foe_pri_key key;
+	s32 hash_index;
+	struct foe_entry *entry = NULL;
+	s32 rply_idx;
+	int done_bit;
+
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		key.ipv4_hnapt.sip = opt->ing_sipv4;
+		key.ipv4_hnapt.dip = opt->ing_dipv4;
+		key.ipv4_hnapt.sport = opt->ing_sp;
+		key.ipv4_hnapt.dport = opt->ing_dp;
+		/* key.ipv4_hnapt.is_udp=opt->is_udp; */
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		key.ipv6_routing.sip0 = opt->ing_sipv6_0;
+		key.ipv6_routing.sip1 = opt->ing_sipv6_1;
+		key.ipv6_routing.sip2 = opt->ing_sipv6_2;
+		key.ipv6_routing.sip3 = opt->ing_sipv6_3;
+		key.ipv6_routing.dip0 = opt->ing_dipv6_0;
+		key.ipv6_routing.dip1 = opt->ing_dipv6_1;
+		key.ipv6_routing.dip2 = opt->ing_dipv6_2;
+		key.ipv6_routing.dip3 = opt->ing_dipv6_3;
+		key.ipv6_routing.sport = opt->ing_sp;
+		key.ipv6_routing.dport = opt->ing_dp;
+		/* key.ipv6_routing.is_udp=opt->is_udp; */
+	}
+
+	key.pkt_type = opt->pkt_type;
+
+	/* find bind entry */
+	/* hash_index = FoeHashFun(&key,BIND); */
+	hash_index = get_ppe_entry_idx(&key, entry, 1);
+	if (hash_index != -1) {
+		opt->hash_index = hash_index;
+		rply_idx = reply_entry_idx(opt, hash_index);
+		if (fe_feature & SEMI_AUTO_MODE) {
+			entry =  &ppe_foe_base[hash_index];
+			done_bit = get_entry_done_bit(entry);
+			if (done_bit == 1) {
+				set_done_bit_zero(entry);
+			} else if (done_bit == 0) {
+				pr_info("%s : ppe table not ready\n", __func__);
+			} else {
+				pr_info("%s: done_bit something wrong\n", __func__);
+				set_done_bit_zero(entry);
+			}
+			if (done_bit != 1)
+				return HWNAT_FAIL;
+		}
+		foe_del_entry_by_num(hash_index);
+		pr_info("Clear Entry index = %d\n", hash_index);
+		if (rply_idx != -1) {
+		pr_info("Clear Entry index = %d\n", rply_idx);
+			foe_del_entry_by_num(rply_idx);
+		}
+
+		return HWNAT_SUCCESS;
+	}
+	pr_info("HWNAT ENTRY NOT FOUND\n");
+	return HWNAT_ENTRY_NOT_FOUND;
+}
+EXPORT_SYMBOL(foe_del_entry);
+
+int get_five_tule(struct sk_buff *skb)
+{
+	struct ethhdr *eth = NULL;
+	struct iphdr *iph = NULL;
+	struct ipv6hdr *ip6h = NULL;
+	struct tcphdr *th = NULL;
+	struct udphdr *uh = NULL;
+	u8 ipv6_head_len = 0;
+
+	memset(&ppe_parse_rx_result, 0, sizeof(ppe_parse_rx_result));
+	eth = (struct ethhdr *)skb->data;
+	ppe_parse_rx_result.eth_type = eth->h_proto;
+	/* set layer4 start addr */
+	if ((ppe_parse_rx_result.eth_type == htons(ETH_P_IP)) ||
+	    (ppe_parse_rx_result.eth_type == htons(ETH_P_PPP_SES) &&
+	    (ppe_parse_rx_result.ppp_tag == htons(PPP_IP)))) {
+		iph = (struct iphdr *)(skb->data + ETH_HLEN);
+		memcpy(&ppe_parse_rx_result.iph, iph, sizeof(struct iphdr));
+		if (iph->protocol == IPPROTO_TCP) {
+			skb_set_transport_header(skb, ETH_HLEN + (iph->ihl * 4));
+			th = (struct tcphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_rx_result.th, th, sizeof(struct tcphdr));
+			ppe_parse_rx_result.pkt_type = IPV4_HNAPT;
+			if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+				if (debug_level >= 2)
+					DD;
+				return 1;
+			}
+		} else if (iph->protocol == IPPROTO_UDP) {
+			skb_set_transport_header(skb, ETH_HLEN + (iph->ihl * 4));
+			uh = (struct udphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_rx_result.uh, uh, sizeof(struct udphdr));
+			ppe_parse_rx_result.pkt_type = IPV4_HNAPT;
+			if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+				if (USE_3T_UDP_FRAG == 0)
+					return 1;
+			}
+		} else if (iph->protocol == IPPROTO_GRE) {
+			if (debug_level >= 2)
+				/* do nothing */
+				return 1;
+		}
+		if (fe_feature & HNAT_IPV6) {
+			if (iph->protocol == IPPROTO_IPV6) {
+				ip6h = (struct ipv6hdr *)((uint8_t *)iph + iph->ihl * 4);
+				memcpy(&ppe_parse_rx_result.ip6h, ip6h, sizeof(struct ipv6hdr));
+				if (ip6h->nexthdr == NEXTHDR_TCP) {
+					skb_set_transport_header(skb, ETH_HLEN + (sizeof(struct ipv6hdr)));
+					th = (struct tcphdr *)skb_transport_header(skb);
+					memcpy(&ppe_parse_rx_result.th.source, &th->source, sizeof(th->source));
+					memcpy(&ppe_parse_rx_result.th.dest, &th->dest, sizeof(th->dest));
+				} else if (ip6h->nexthdr == NEXTHDR_UDP) {
+					skb_set_transport_header(skb, ETH_HLEN + (sizeof(struct ipv6hdr)));
+					uh = (struct udphdr *)skb_transport_header(skb);
+					memcpy(&ppe_parse_rx_result.uh.source, &uh->source, sizeof(uh->source));
+					memcpy(&ppe_parse_rx_result.uh.dest, &uh->dest, sizeof(uh->dest));
+				}
+					ppe_parse_rx_result.pkt_type = IPV6_6RD;
+				if (hnat_chip_name & MT7621_HWNAT)
+					return 1;
+		/* identification field in outer ipv4 header is zero*/
+		/*after erntering binding state.*/
+		/* some 6rd relay router will drop the packet */
+			}
+		}
+		if ((iph->protocol != IPPROTO_TCP) && (iph->protocol != IPPROTO_UDP) &&
+		    (iph->protocol != IPPROTO_GRE) && (iph->protocol != IPPROTO_IPV6))
+			return 1;
+				/* Packet format is not supported */
+
+	} else if (ppe_parse_rx_result.eth_type == htons(ETH_P_IPV6) ||
+		   (ppe_parse_rx_result.eth_type == htons(ETH_P_PPP_SES) &&
+		    ppe_parse_rx_result.ppp_tag == htons(PPP_IPV6))) {
+		ip6h = (struct ipv6hdr *)skb_network_header(skb);
+		memcpy(&ppe_parse_rx_result.ip6h, ip6h, sizeof(struct ipv6hdr));
+		if (ip6h->nexthdr == NEXTHDR_TCP) {
+			skb_set_transport_header(skb, ETH_HLEN + (sizeof(struct ipv6hdr)));
+			th = (struct tcphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_rx_result.th, th, sizeof(struct tcphdr));
+			ppe_parse_rx_result.pkt_type = IPV6_5T_ROUTE;
+		} else if (ip6h->nexthdr == NEXTHDR_UDP) {
+			skb_set_transport_header(skb, ETH_HLEN + (sizeof(struct ipv6hdr)));
+			uh = (struct udphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_rx_result.uh, uh, sizeof(struct udphdr));
+			ppe_parse_rx_result.pkt_type = IPV6_5T_ROUTE;
+		} else if (ip6h->nexthdr == NEXTHDR_IPIP) {
+			ipv6_head_len = sizeof(struct iphdr);
+			memcpy(&ppe_parse_rx_result.iph, ip6h + ipv6_head_len,
+			       sizeof(struct iphdr));
+			ppe_parse_rx_result.pkt_type = IPV4_DSLITE;
+		} else {
+			ppe_parse_rx_result.pkt_type = IPV6_3T_ROUTE;
+		}
+
+	} else {
+				if (debug_level >= 2)
+					DD;
+		return 1;
+	}
+	return 0;
+}
+
+int decide_qid(u16 hash_index, struct sk_buff *skb)
+{
+	struct foe_entry *entry;
+	u32 saddr;
+	u32 daddr;
+
+	u32 ppe_saddr;
+	u32 ppe_daddr;
+	u32 ppe_sport;
+	u32 ppe_dport;
+
+	u32 sport;
+	u32 dport;
+
+	u32 ipv6_sip_127_96;
+	u32 ipv6_sip_95_64;
+	u32 ipv6_sip_63_32;
+	u32 ipv6_sip_31_0;
+
+	u32 ipv6_dip_127_96;
+	u32 ipv6_dip_95_64;
+	u32 ipv6_dip_63_32;
+	u32 ipv6_dip_31_0;
+
+	u32 ppe_saddr_127_96;
+	u32 ppe_saddr_95_64;
+	u32 ppe_saddr_63_32;
+	u32 ppe_saddr_31_0;
+
+	u32 ppe_daddr_127_96;
+	u32 ppe_daddr_95_64;
+	u32 ppe_daddr_63_32;
+	u32 ppe_daddr_31_0;
+
+	u32 ppe_sportv6;
+	u32 ppe_dportv6;
+
+	entry = &ppe_foe_base[hash_index];
+	if (IS_IPV4_HNAPT(entry)) {
+		saddr = ntohl(ppe_parse_rx_result.iph.saddr);
+		daddr = ntohl(ppe_parse_rx_result.iph.daddr);
+		if (ppe_parse_rx_result.iph.protocol == IPPROTO_TCP) {
+			sport = ntohs(ppe_parse_rx_result.th.source);
+			dport = ntohs(ppe_parse_rx_result.th.dest);
+		} else if (ppe_parse_rx_result.iph.protocol == IPPROTO_UDP) {
+			sport = ntohs(ppe_parse_rx_result.uh.source);
+			dport = ntohs(ppe_parse_rx_result.uh.dest);
+		}
+		ppe_saddr = entry->ipv4_hnapt.sip;
+		ppe_daddr = entry->ipv4_hnapt.dip;
+		ppe_sport = entry->ipv4_hnapt.sport;
+		ppe_dport = entry->ipv4_hnapt.dport;
+		if (debug_level >= 2) {
+			pr_info("ppe_saddr = %x, ppe_daddr=%x, ppe_sport=%d, ppe_dport=%d, saddr=%x, daddr=%x, sport= %d, dport=%d\n",
+				ppe_saddr, ppe_daddr, ppe_sport, ppe_dport, saddr, daddr, sport, dport);
+		}
+		if ((saddr == ppe_saddr) && (daddr == ppe_daddr) &&
+		    (sport == ppe_sport) && (dport == ppe_dport) &&
+		    (entry->bfib1.state == BIND)) {
+			if (entry->ipv4_hnapt.iblk2.dp == 2) {
+				skb->dev = dst_port[DP_GMAC2];
+				if (debug_level >= 2)
+					pr_info("qid = %d\n", entry->ipv4_hnapt.iblk2.qid);
+				skb->mark = entry->ipv4_hnapt.iblk2.qid;
+			} else{
+				skb->dev = dst_port[DP_GMAC1];
+				if (debug_level >= 2)
+					pr_info("qid = %d\n", entry->ipv4_hnapt.iblk2.qid);
+				skb->mark = entry->ipv4_hnapt.iblk2.qid;
+			}
+			return 0;
+		} else {
+			return -1;
+		}
+	}
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_5T_ROUTE(entry)) {
+			ipv6_sip_127_96 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[0]);
+			ipv6_sip_95_64 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[1]);
+			ipv6_sip_63_32 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[2]);
+			ipv6_sip_31_0 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[3]);
+
+			ipv6_dip_127_96 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[0]);
+			ipv6_dip_95_64 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[1]);
+			ipv6_dip_63_32 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[2]);
+			ipv6_dip_31_0 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[3]);
+
+			ppe_saddr_127_96 = entry->ipv6_5t_route.ipv6_sip0;
+			ppe_saddr_95_64 = entry->ipv6_5t_route.ipv6_sip1;
+			ppe_saddr_63_32 = entry->ipv6_5t_route.ipv6_sip2;
+			ppe_saddr_31_0 = entry->ipv6_5t_route.ipv6_sip3;
+
+			ppe_daddr_127_96 = entry->ipv6_5t_route.ipv6_dip0;
+			ppe_daddr_95_64 = entry->ipv6_5t_route.ipv6_dip1;
+			ppe_daddr_63_32 = entry->ipv6_5t_route.ipv6_dip2;
+			ppe_daddr_31_0 = entry->ipv6_5t_route.ipv6_dip3;
+
+			ppe_sportv6 = entry->ipv6_5t_route.sport;
+			ppe_dportv6 = entry->ipv6_5t_route.dport;
+			if (ppe_parse_rx_result.iph.protocol == IPPROTO_TCP) {
+				sport = ntohs(ppe_parse_rx_result.th.source);
+				dport = ntohs(ppe_parse_rx_result.th.dest);
+			} else if (ppe_parse_rx_result.iph.protocol == IPPROTO_UDP) {
+				sport = ntohs(ppe_parse_rx_result.uh.source);
+				dport = ntohs(ppe_parse_rx_result.uh.dest);
+			}
+			if ((ipv6_sip_127_96 == ppe_saddr_127_96) && (ipv6_sip_95_64 == ppe_saddr_95_64) &&
+			    (ipv6_sip_63_32 == ppe_saddr_63_32) && (ipv6_sip_31_0 == ppe_saddr_31_0) &&
+			    (ipv6_dip_127_96 == ppe_daddr_127_96) && (ipv6_dip_95_64 == ppe_daddr_95_64) &&
+			    (ipv6_dip_63_32 == ppe_daddr_63_32) && (ipv6_dip_31_0 == ppe_daddr_31_0) &&
+			    (sport == ppe_sportv6) && (dport == ppe_dportv6) &&
+			    (entry->bfib1.state == BIND)) {
+				if (entry->ipv6_5t_route.iblk2.dp == 2) {
+					skb->dev = dst_port[DP_GMAC2];
+						/* if (entry->ipv6_3t_route.iblk2.qid >= 11) */
+					skb->mark = (entry->ipv6_3t_route.iblk2.qid);
+				} else{
+					skb->dev = dst_port[DP_GMAC1];
+					skb->mark = (entry->ipv6_3t_route.iblk2.qid);
+				}
+			} else {
+				return -1;
+			}
+		}
+	}
+	return 0;
+}
+
+void set_qid(struct sk_buff *skb)
+{
+	struct foe_pri_key key;
+	s32 hash_index;
+	struct foe_entry *entry = NULL;
+
+	get_five_tule(skb);
+	if (ppe_parse_rx_result.pkt_type == IPV4_HNAPT) {
+		key.ipv4_hnapt.sip = ntohl(ppe_parse_rx_result.iph.saddr);
+		key.ipv4_hnapt.dip = ntohl(ppe_parse_rx_result.iph.daddr);
+
+		if (ppe_parse_rx_result.iph.protocol == IPPROTO_TCP) {
+			key.ipv4_hnapt.sport = ntohs(ppe_parse_rx_result.th.source);
+			key.ipv4_hnapt.dport = ntohs(ppe_parse_rx_result.th.dest);
+		} else if (ppe_parse_rx_result.iph.protocol == IPPROTO_UDP) {
+			key.ipv4_hnapt.sport = ntohs(ppe_parse_rx_result.uh.source);
+			key.ipv4_hnapt.dport = ntohs(ppe_parse_rx_result.uh.dest);
+		}
+		/* key.ipv4_hnapt.is_udp=opt->is_udp; */
+	} else if (ppe_parse_rx_result.pkt_type == IPV6_5T_ROUTE) {
+		key.ipv6_routing.sip0 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[0]);
+		key.ipv6_routing.sip1 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[1]);
+		key.ipv6_routing.sip2 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[2]);
+		key.ipv6_routing.sip3 = ntohl(ppe_parse_rx_result.ip6h.saddr.s6_addr32[3]);
+		key.ipv6_routing.dip0 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[0]);
+		key.ipv6_routing.dip1 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[1]);
+		key.ipv6_routing.dip2 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[2]);
+		key.ipv6_routing.dip3 = ntohl(ppe_parse_rx_result.ip6h.daddr.s6_addr32[3]);
+		if (ppe_parse_rx_result.ip6h.nexthdr == IPPROTO_TCP) {
+			key.ipv6_routing.sport = ntohs(ppe_parse_rx_result.th.source);
+			key.ipv6_routing.dport = ntohs(ppe_parse_rx_result.th.dest);
+		} else if (ppe_parse_rx_result.ip6h.nexthdr == IPPROTO_UDP) {
+			key.ipv6_routing.sport = ntohs(ppe_parse_rx_result.uh.source);
+			key.ipv6_routing.dport = ntohs(ppe_parse_rx_result.uh.dest);
+		}
+	}
+
+	key.pkt_type = ppe_parse_rx_result.pkt_type;
+
+	/* find bind entry */
+	/* hash_index = FoeHashFun(&key,BIND); */
+	hash_index = get_ppe_entry_idx(&key, entry, 1);
+	if (hash_index != -1)
+		decide_qid(hash_index, skb);
+	if (debug_level >= 6)
+		pr_info("hash_index = %d\n", hash_index);
+}
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/frame_engine.h b/src/kernel/modules/netsys_driver/nat/hw_nat/frame_engine.h
new file mode 100755
index 0000000..2fd40b5
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/frame_engine.h
@@ -0,0 +1,602 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _FE_WANTED
+#define _FE_WANTED
+
+#include <linux/version.h>
+#include <linux/ppp_defs.h>
+#include <linux/etherdevice.h>
+
+extern void __iomem *fe_base;
+extern void __iomem *med_base;
+extern void __iomem *netsys_base;
+
+
+#define MTK_FE_RANGE			(0x20000)
+#define FE_BASE			 	fe_base
+
+
+#if defined(CONFIG_HNAT_V1)
+#define MTK_FE_BASE			(0x15100000)
+#define MTK_ETHDMA_BASE			(0x15000000)
+#define MTK_MED_BASE			(0x15B38000)
+#define MED_BASE                	(med_base)
+#define MDMA_BASE			(fe_base + 0x4000)
+#define PPE_BASE                	(FE_BASE + 0xc00)
+#define PPE1_BASE   			(FE_BASE + 0x1000)
+#endif
+
+#if defined(CONFIG_HNAT_V2)
+#define MTK_FE_BASE			(0x15100000)
+#define MTK_ETHDMA_BASE			(0x15000000)
+#define MTK_MED_BASE			(0x15B38000)
+#define MED_BASE                	(med_base)
+#define MDMA_BASE			(FE_BASE + 0x5400)
+#define PPE_BASE                	(FE_BASE + 0x2000)
+#define PPE1_BASE			(FE_BASE + 0x2400)
+#endif
+
+#define WED_ACG			3
+#define MED_ACG			4
+#define ETH0_ACG		1
+#define ETH1_ACG		2
+#define NO_USE			(0x3f)
+
+#define MAC_ARG(x) (((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], \
+		       ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5])
+
+#define IPV6_ADDR(x) (ntohs(x[0]), ntohs(x[1]), ntohs(x[2]), ntohs(x[3]), ntohs(x[4]),\
+		     ntohs(x[5]), ntohs(x[6]), ntohs(x[7]))
+
+#define IN
+#define OUT
+#define INOUT
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define NAT_DEBUG
+
+#ifdef NAT_DEBUG
+#define NAT_PRINT(fmt, args...) printk(fmt, ## args)
+#else
+#define NAT_PRINT(fmt, args...) { }
+#endif
+
+
+#define FOE_TS		    (FE_BASE + 0x0010)
+#define MTK_WDMA_BASE       (FE_BASE + 0x2800)
+#define PSE_PPE0_DROP       (FE_BASE + 0x0110)
+#define PSE_PPE1_DROP       (FE_BASE + 0x0114)
+#define PPE_GLO_CFG	    (PPE_BASE + 0x200)
+#define PPE_FLOW_CFG	    (PPE_BASE + 0x204)
+#define PPE_FLOW_SET	    PPE_FLOW_CFG
+
+#define PPE_IP_PROT_CHK	    (PPE_BASE + 0x208)
+
+#define PPE_IP_PROT_0	    (PPE_BASE + 0x20C)
+#define PPE_IP_PROT_1	    (PPE_BASE + 0x210)
+#define PPE_IP_PROT_2	    (PPE_BASE + 0x214)
+#define PPE_IP_PROT_3	    (PPE_BASE + 0x218)
+#define PPE_TB_CFG	    (PPE_BASE + 0x21C)
+#define PPE_FOE_CFG	    PPE_TB_CFG
+#define PPE_TB_BASE	    (PPE_BASE + 0x220)
+#define PPE_FOE_BASE	    (PPE_TB_BASE)
+
+#define PPE_TB_USED	    (PPE_BASE + 0x224)
+#define PPE_BNDR	    (PPE_BASE + 0x228)
+#define PPE_FOE_BNDR	    PPE_BNDR
+#define PPE_BIND_LMT_0	    (PPE_BASE + 0x22C)
+#define PPE_FOE_LMT1	    (PPE_BIND_LMT_0)
+#define PPE_BIND_LMT_1	    (PPE_BASE + 0x230)
+#define PPE_FOE_LMT2	    PPE_BIND_LMT_1
+#define PPE_KA		    (PPE_BASE + 0x234)
+#define PPE_FOE_KA	    PPE_KA
+#define PPE_UNB_AGE	    (PPE_BASE + 0x238)
+#define PPE_FOE_UNB_AGE	    PPE_UNB_AGE
+#define PPE_BND_AGE_0	    (PPE_BASE + 0x23C)
+#define PPE_FOE_BND_AGE0    PPE_BND_AGE_0
+#define PPE_BND_AGE_1	    (PPE_BASE + 0x240)
+#define PPE_FOE_BND_AGE1    PPE_BND_AGE_1
+#define PPE_HASH_SEED	    (PPE_BASE + 0x244)
+
+#define PPE_MCAST_L_10       (PPE_BASE + 0x00)
+#define PPE_MCAST_H_10       (PPE_BASE + 0x04)
+
+#define PPE_DFT_CPORT       (PPE_BASE + 0x248)
+#define PPE_DFT_CPORT1      (PPE_BASE + 0x24C)
+#define PPE_MCAST_PPSE	    (PPE_BASE + 0x284)
+#define PPE_MCAST_L_0       (PPE_BASE + 0x288)
+#define PPE_MCAST_H_0       (PPE_BASE + 0x28C)
+#define PPE_MCAST_L_1       (PPE_BASE + 0x290)
+#define PPE_MCAST_H_1       (PPE_BASE + 0x294)
+#define PPE_MCAST_L_2       (PPE_BASE + 0x298)
+#define PPE_MCAST_H_2       (PPE_BASE + 0x29C)
+#define PPE_MCAST_L_3       (PPE_BASE + 0x2A0)
+#define PPE_MCAST_H_3       (PPE_BASE + 0x2A4)
+#define PPE_MCAST_L_4       (PPE_BASE + 0x2A8)
+#define PPE_MCAST_H_4       (PPE_BASE + 0x2AC)
+#define PPE_MCAST_L_5       (PPE_BASE + 0x2B0)
+#define PPE_MCAST_H_5       (PPE_BASE + 0x2B4)
+#define PPE_MCAST_L_6       (PPE_BASE + 0x2BC)
+#define PPE_MCAST_H_6       (PPE_BASE + 0x2C0)
+#define PPE_MCAST_L_7       (PPE_BASE + 0x2C4)
+#define PPE_MCAST_H_7       (PPE_BASE + 0x2C8)
+#define PPE_MCAST_L_8       (PPE_BASE + 0x2CC)
+#define PPE_MCAST_H_8       (PPE_BASE + 0x2D0)
+#define PPE_MCAST_L_9       (PPE_BASE + 0x2D4)
+#define PPE_MCAST_H_9       (PPE_BASE + 0x2D8)
+#define PPE_MCAST_L_A       (PPE_BASE + 0x2DC)
+#define PPE_MCAST_H_A       (PPE_BASE + 0x2E0)
+#define PPE_MCAST_L_B       (PPE_BASE + 0x2E4)
+#define PPE_MCAST_H_B       (PPE_BASE + 0x2E8)
+#define PPE_MCAST_L_C       (PPE_BASE + 0x2EC)
+#define PPE_MCAST_H_C       (PPE_BASE + 0x2F0)
+#define PPE_MCAST_L_D       (PPE_BASE + 0x2F4)
+#define PPE_MCAST_H_D       (PPE_BASE + 0x2F8)
+#define PPE_MCAST_L_E       (PPE_BASE + 0x2FC)
+#define PPE_MCAST_H_E       (PPE_BASE + 0x2E0)
+#define PPE_MCAST_L_F       (PPE_BASE + 0x300)
+#define PPE_MCAST_H_F       (PPE_BASE + 0x304)
+#define PPE_MTU_DRP         (PPE_BASE + 0x308)
+#define PPE_MTU_VLYR_0      (PPE_BASE + 0x30C)
+#define PPE_MTU_VLYR_1      (PPE_BASE + 0x310)
+#define PPE_MTU_VLYR_2      (PPE_BASE + 0x314)
+#define PPE_VPM_TPID        (PPE_BASE + 0x318)
+
+#define CAH_CTRL	    (PPE_BASE + 0x320)
+#define CAH_TAG_SRH         (PPE_BASE + 0x324)
+#define CAH_LINE_RW         (PPE_BASE + 0x328)
+#define CAH_WDATA           (PPE_BASE + 0x32C)
+#define CAH_RDATA           (PPE_BASE + 0x330)
+
+#define CAH_CTRL	    (PPE_BASE + 0x320)
+#define CAH_TAG_SRH         (PPE_BASE + 0x324)
+#define CAH_LINE_RW         (PPE_BASE + 0x328)
+#define CAH_WDATA           (PPE_BASE + 0x32C)
+#define CAH_RDATA           (PPE_BASE + 0x330)
+#define PPE_SBW_CTRL        (PPE_BASE + 0x374)
+
+#define PS_CFG	            (PPE_BASE + 0x400)
+#define PS_FBC		    (PPE_BASE + 0x404)
+#define PS_TB_BASE	    (PPE_BASE + 0x408)
+#define PS_TME_SMP	    (PPE_BASE + 0x40C)
+
+#define MIB_CFG		    (PPE_BASE + 0x334)
+#define MIB_TB_BASE	    (PPE_BASE + 0x338)
+#define MIB_SER_CR	    (PPE_BASE + 0x33C)
+#define MIB_SER_R0	    (PPE_BASE + 0x340)
+#define MIB_SER_R1	    (PPE_BASE + 0x344)
+#define MIB_SER_R2	    (PPE_BASE + 0x348)
+#define MIB_CAH_CTRL	    (PPE_BASE + 0x350)
+#define PPE_6RD_ID	    (PPE_BASE + 0x36c)
+
+#define MDMA_TX_BASE_PTR_0	(MDMA_BASE)	
+#define MDMA_TX_MAX_CNT_0	(MDMA_BASE + 0x4)
+#define MDMA_TX_CTX_IDX_0	(MDMA_BASE + 0x8)
+#define MDMA_TX_DTX_IDX_0	(MDMA_BASE + 0xc)
+#define MDMA_RX_BASE_PTR_0	(MDMA_BASE + 0x100)
+#define MDMA_RX_MAX_CNT_0	(MDMA_BASE + 0x104)
+#define MDMA_RX_CRX_IDX_0	(MDMA_BASE + 0x108)
+#define MDMA_RX_DRX_IDX_0	(MDMA_BASE + 0x10c)
+// 46240/8 = 5450 
+#define MED_INFO_SIZE			(5450)
+//MED_HNAT_INFO_HOST_START_PTR (40b)
+#define MEDHW_SSR1_DST_RB0_BASE		(MED_BASE + 0x80)
+#define MEDHW_SSR1_DST_RB0_BASE_HI	(MED_BASE + 0x84)
+
+//MED_HNAT_INFO_HOST_CNT (18bit) :
+#define MEDHW_SSR1_DST_RB0_SIZE		(MED_BASE + 0x88)
+
+//MED_HNAT_INFO_HOST_WIDX (18b) : 
+#define MEDHW_SSR1_DST_RB0_WIDX		(MED_BASE + 0x90)
+
+//MED_HNAT_INFO_HOST_RIDX (18b): 
+#define MEDHW_SSR1_DST_RB0_RIDX		(MED_BASE + 0x94)
+#define MEDHW_SSR1_DST_RB0_REMAIN	(MED_BASE + 0x98)
+#define MEDHW_SSR1_DST_RB0_OCCUPY	(MED_BASE + 0x9c)
+
+#define MEDHW_SSR1_DST_RB0_CFG		(MED_BASE + 0xa0)
+#define MEDHW_SSR1_DST_RB0_STS		(MED_BASE + 0xa4)
+//MED_HNAT_INFO_HOST_ADD_ENTRY_CNT (18): 
+#define MEDHW_SSR1_DST_RB0_INC		(MED_BASE + 0xb0)
+#define MEDHW_SSR1_DST_RB0_DEC		(MED_BASE + 0xb4)
+
+//MED_HNAT_INFO_HOST_START_IDX (18b) 
+#define MEDHW_SSR1_DST_RB0_RSTR		(MED_BASE + 0xb8)
+
+/*CAH_RDATA[17:16] */
+/*0: invalid */
+/*1: valid */
+/*2: dirty */
+/*3: lock */
+/*CAH_RDATA[15:0]: entry num*/
+/* #define CAH_RDATA	    PPE_BASE + 0x330 */
+/* TO PPE */
+#define IPV4_PPE_MYUC	    BIT(0) /* my mac */
+#define IPV4_PPE_MC	    BIT(1) /* multicast */
+#define IPV4_PPE_IPM	    BIT(2) /* ip multicast */
+#define IPV4_PPE_BC	    BIT(3) /* broadcast */
+#define IPV4_PPE_UC	    BIT(4) /* ipv4 learned UC frame */
+#define IPV4_PPE_UN	    BIT(5) /* ipv4 unknown  UC frame */
+
+#define IPV6_PPE_MYUC	    BIT(8) /* my mac */
+#define IPV6_PPE_MC	    BIT(9) /* multicast */
+#define IPV6_PPE_IPM	    BIT(10) /* ipv6 multicast */
+#define IPV6_PPE_BC	    BIT(11) /* broadcast */
+#define IPV6_PPE_UC	    BIT(12) /* ipv6 learned UC frame */
+#define IPV6_PPE_UN	    BIT(13) /* ipv6 unknown  UC frame */
+
+#define AC_BASE		    (FE_BASE + 0x2000)
+#define METER_BASE	    (FE_BASE + 0x2000)
+
+#define FE_GDMA1_FWD_CFG    (FE_BASE + 0x500)
+#define FE_GDMA2_FWD_CFG    (FE_BASE + 0x1500)
+
+/* GDMA1 My MAC unicast frame destination port */
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define GDM1_UFRC_P_CPU     (5 << 12)
+#else
+#define GDM1_UFRC_P_CPU     (0 << 12)
+#endif
+
+
+/* GDMA1 broadcast frame MAC address destination port */
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define GDM1_BFRC_P_CPU     (5 << 8)
+#else
+#define GDM1_BFRC_P_CPU     (0 << 8)
+#endif
+
+
+/* GDMA1 multicast frame MAC address destination port */
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define GDM1_MFRC_P_CPU     (5 << 4)
+#else
+#define GDM1_MFRC_P_CPU     (0 << 4)
+#endif
+
+
+/* GDMA1 other MAC address frame destination port */
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define GDM1_OFRC_P_CPU     (5 << 0)
+#else
+#define GDM1_OFRC_P_CPU     (0 << 0)
+#endif
+
+#define	GDMA0_PSE_PORT       1
+#define	GDMA1_PSE_PORT       2
+#define	QDMA_PSE_PORT        5
+#define	WDMA0_PSE_PORT       8
+#define	WDMA1_PSE_PORT       9
+#define	MDMA_PSE_PORT        10
+#define	EDMA0_PSE_PORT       11
+#define	EDMA1_PSE_PORT       12
+#define	ADMA_PSE_PORT        0
+
+#if !defined(CONFIG_ARCH_COLGIN)
+#define GDM1_UFRC_P_PPE     (4 << 12)
+#define GDM1_BFRC_P_PPE     (4 << 8)
+#define GDM1_MFRC_P_PPE     (4 << 4)
+#define GDM1_OFRC_P_PPE     (4 << 0)
+#else
+#define GDM1_UFRC_P_PPE     (3 << 12)
+#define GDM1_BFRC_P_PPE     (3 << 8)
+#define GDM1_MFRC_P_PPE     (3 << 4)
+#define GDM1_OFRC_P_PPE     (3 << 0)
+#define GDM1_UFRC_P_PPE1     (4 << 12)
+#define GDM1_BFRC_P_PPE1     (4 << 8)
+#define GDM1_MFRC_P_PPE1     (4 << 4)
+#define GDM1_OFRC_P_PPE1     (4 << 0)
+#endif
+
+enum FOE_SMA {
+	DROP = 0,		/* Drop the packet */
+	DROP2 = 1,		/* Drop the packet */
+	ONLY_FWD_CPU = 2,	/* Only Forward to CPU */
+	FWD_CPU_BUILD_ENTRY = 3	/* Forward to CPU and build new FOE entry */
+};
+
+enum BIND_DIR {
+	UPSTREAM_ONLY = 0,	/* only speed up upstream flow */
+	DOWNSTREAM_ONLY = 1,	/* only speed up downstream flow */
+	BIDIRECTION = 2		/* speed up bi-direction flow */
+};
+
+/* PPE_GLO_CFG, Offset=0x200 */
+#define DFL_TTL0_DRP		(0)	/* 1:Drop, 0: Alert CPU */
+/* PPE Flow Set*/
+#define BIT_FBC_FOE		BIT(0)	/* PPE engine for broadcast flow */
+#define BIT_FMC_FOE		BIT(1)	/* PPE engine for multicast flow */
+#define BIT_FUC_FOE		BIT(2)	/* PPE engine for multicast flow */
+#define BIT_UDP_IP4F_NAT_EN	BIT(7)  /*Enable IPv4 fragment + UDP packet NAT*/
+#define BIT_IPV6_3T_ROUTE_EN	BIT(8)	/* IPv6 3-tuple route */
+#define BIT_IPV6_5T_ROUTE_EN	BIT(9)	/* IPv6 5-tuple route */
+#define BIT_IPV6_6RD_EN		BIT(10)	/* IPv6 6RD */
+#define BIT_IPV4_464XLAT_EN	BIT(11)	/* IPv4 464XLAT */
+#define BIT_IPV4_NAT_EN		BIT(12)	/* IPv4 NAT */
+#define BIT_IPV4_NAPT_EN	BIT(13)	/* IPv4 NAPT */
+#define BIT_IPV4_DSL_EN		BIT(14)	/* IPv4 DS-Lite */
+#define BIT_IP_PROT_CHK_BLIST	BIT(16)	/* IP protocol check is black/white list */
+#define BIT_IPV4_NAT_FRAG_EN	BIT(17)	/* Enable fragment support for IPv4 NAT flow */
+#define BIT_IPV6_HASH_FLAB	BIT(18)
+/* For IPv6 5-tuple and 6RD flow, using flow label instead of sport and dport to do HASH */
+#define BIT_IPV4_HASH_GREK	BIT(19)	/* For IPv4 NAT, adding GRE key into HASH */
+#define BIT_IPV6_HASH_GREK	BIT(20)	/* For IPv6 3-tuple, adding GRE key into HASH */
+#define BIT_IPV4_MAPE_EN	BIT(21)	/*MAPE*/
+#define BIT_IPV4_MAPT_EN	BIT(22)	/*MAPT*/
+
+#define IS_IPV6_FLAB_EBL()	((reg_read(PPE_FLOW_SET) & BIT_IPV6_HASH_FLAB) ? 1 : 0)
+
+/* PPE FOE Bind Rate*/
+/* packet in a time stamp unit */
+#define DFL_FOE_BNDR		30
+/*config  RA_HW_NAT_PBND_RD_LMT*/
+/*        int "max retyr count"*/
+/*	default 10*/
+/*	depends on RA_HW_NAT_PREBIND*/
+#define DFL_PBND_RD_LMT		10
+/*config  RA_HW_NAT_PBND_RD_PRD*/
+/*int "check interval in pause state (us) Max:65535"*/
+/*	default 1000*/
+/*	depends on RA_HW_NAT_PREBIND*/
+#define DFL_PBND_RD_PRD		1000
+
+/* PPE_FOE_LMT */
+/* smaller than 1/4 of total entries */
+#define DFL_FOE_QURT_LMT	16383 /* CONFIG_RA_HW_NAT_QURT_LMT */
+
+/* between 1/2 and 1/4 of total entries */
+#define DFL_FOE_HALF_LMT	16383 /* CONFIG_RA_HW_NAT_HALF_LMT */
+
+/* between full and 1/2 of total entries */
+#define DFL_FOE_FULL_LMT	32767 /* CONFIG_RA_HW_NAT_FULL_LMT */
+
+/* PPE_FOE_KA*/
+/* visit a FOE entry every FOE_KA_T * 1 msec */
+#define DFL_FOE_KA_T		1
+
+#if defined(CONFIG_RA_HW_NAT_TBL_1K)
+/* FOE_TCP_KA * FOE_KA_T * FOE_4TB_SIZ */
+/*TCP KeepAlive Interval(Unit:1Sec)*/
+#define DFL_FOE_TCP_KA		5
+/* FOE_UDP_KA * FOE_KA_T * FOE_4TB_SIZ */
+/*UDP KeepAlive Interval(Unit:1Sec)*/
+#define DFL_FOE_UDP_KA		5
+/* FOE_NTU_KA * FOE_KA_T * FOE_4TB_SIZ */
+/*Non-TCP/UDP KeepAlive Interval(Unit:1Sec)*/
+#define DFL_FOE_NTU_KA		5
+#elif defined(CONFIG_RA_HW_NAT_TBL_2K)
+/*(Unit:2Sec)*/
+#define DFL_FOE_TCP_KA		3
+#define DFL_FOE_UDP_KA		3
+#define DFL_FOE_NTU_KA		3
+#elif defined(CONFIG_RA_HW_NAT_TBL_4K)
+/*(Unit:4Sec)*/
+#define DFL_FOE_TCP_KA		1
+#define DFL_FOE_UDP_KA		1
+#define DFL_FOE_NTU_KA		1
+#elif defined(CONFIG_RA_HW_NAT_TBL_8K)
+/*(Unit:8Sec)*/
+#define DFL_FOE_TCP_KA		1
+#define DFL_FOE_UDP_KA		1
+#define DFL_FOE_NTU_KA		1
+#elif defined(CONFIG_RA_HW_NAT_TBL_16K)
+/*(Unit:16Sec)*/
+#define DFL_FOE_TCP_KA		1
+#define DFL_FOE_UDP_KA		1
+#define DFL_FOE_NTU_KA		1
+#elif defined(CONFIG_RA_HW_NAT_TBL_32K)
+/*(Unit:16Sec)*/
+#define DFL_FOE_TCP_KA		1
+#define DFL_FOE_UDP_KA		1
+#define DFL_FOE_NTU_KA		1
+#endif
+
+/*PPE_FOE_CFG*/
+#if defined(CONFIG_RA_HW_NAT_HASH0)
+#define DFL_FOE_HASH_MODE	0
+#elif defined(CONFIG_RA_HW_NAT_HASH1)
+#define DFL_FOE_HASH_MODE	1
+#elif defined(CONFIG_RA_HW_NAT_HASH2)
+#define DFL_FOE_HASH_MODE	2
+#elif defined(CONFIG_RA_HW_NAT_HASH3)
+#define DFL_FOE_HASH_MODE	3
+#elif defined(CONFIG_RA_HW_NAT_HASH_DBG)
+#define DFL_FOE_HASH_MODE	0 /* don't care */
+#endif
+
+#define HASH_SEED		0x12345678
+#define DFL_FOE_UNB_AGE		1	/* Unbind state age enable */
+#define DFL_FOE_TCP_AGE		1	/* Bind TCP age enable */
+#define DFL_FOE_NTU_AGE		1	/* Bind TCP age enable */
+#define DFL_FOE_UDP_AGE		1	/* Bind UDP age enable */
+#define DFL_FOE_FIN_AGE		1	/* Bind TCP FIN age enable */
+#define DFL_FOE_KA		3	/* 0:disable 1:unicast old 2: multicast new 3. duplicate old */
+
+/*PPE_FOE_UNB_AGE*/
+/*The min threshold of packet count for aging out at unbind state */
+/*An unbind flow whose pkt counts < Min threshold and idle time > Life time*/
+/*=> This unbind entry would be aged out*/
+/*[Notes: Idle time = current time - last packet receive time] (Pkt count)*/
+#define DFL_FOE_UNB_MNP		1000
+/* Delta time for aging out an unbind FOE entry */
+/*set ageout time for bind Unbind entry(Unit:1Sec)*/
+#define DFL_FOE_UNB_DLTA	3
+/* Delta time for aging out an bind Non-TCP/UDP FOE entry */
+#define DFL_FOE_NTU_DLTA	5
+
+/* PPE_FOE_BND_AGE1*/
+/* Delta time for aging out an bind UDP FOE entry */
+/*Set ageout time for bind UDP entry(Unit:1Sec)*/
+#define DFL_FOE_UDP_DLTA	5
+
+/*PPE_FOE_BND_AGE2*/
+/* Delta time for aging out an bind TCP FIN entry */
+/*Set ageout time for FIN entry*/
+#define DFL_FOE_FIN_DLTA	5
+/* Delta time for aging out an bind TCP entry */
+/*Set ageout time for bind TCP entry (Unit:1Sec)*/
+#define DFL_FOE_TCP_DLTA	5
+
+#define DFL_FOE_TTL_REGEN	1	/* TTL = TTL -1 */
+
+#define PPE1_GLO_CFG	    (PPE1_BASE + 0x200)
+#define PPE1_FLOW_CFG	    (PPE1_BASE + 0x204)
+#define PPE1_FLOW_SET	    PPE1_FLOW_CFG
+
+#define PPE1_IP_PROT_CHK    (PPE1_BASE + 0x208)
+#define PPE1_IP_PROT_0	    (PPE1_BASE + 0x20C)
+#define PPE1_IP_PROT_1	    (PPE1_BASE + 0x210)
+#define PPE1_IP_PROT_2	    (PPE1_BASE + 0x214)
+#define PPE1_IP_PROT_3	    (PPE1_BASE + 0x218)
+#define PPE1_TB_CFG	    (PPE1_BASE + 0x21C)
+#define PPE1_FOE_CFG	    PPE1_TB_CFG
+#define PPE1_TB_BASE	    (PPE1_BASE + 0x220)
+#define PPE1_FOE_BASE	    (PPE1_TB_BASE)
+#define PPE1_TB_USED	    (PPE1_BASE + 0x224)
+#define PPE1_BNDR	    (PPE1_BASE + 0x228)
+#define PPE1_FOE_BNDR	    PPE1_BNDR
+#define PPE1_BIND_LMT_0	    (PPE1_BASE + 0x22C)
+#define PPE1_FOE_LMT1	    (PPE1_BIND_LMT_0)
+#define PPE1_BIND_LMT_1	    (PPE1_BASE + 0x230)
+#define PPE1_FOE_LMT2	    PPE1_BIND_LMT_1
+#define PPE1_KA		    (PPE1_BASE + 0x234)
+#define PPE1_FOE_KA	    PPE1_KA
+#define PPE1_UNB_AGE	    (PPE1_BASE + 0x238)
+#define PPE1_FOE_UNB_AGE	    PPE1_UNB_AGE
+#define PPE1_BND_AGE_0	    (PPE1_BASE + 0x23C)
+#define PPE1_FOE_BND_AGE0    PPE1_BND_AGE_0
+#define PPE1_BND_AGE_1	    (PPE1_BASE + 0x240)
+#define PPE1_FOE_BND_AGE1    PPE1_BND_AGE_1
+#define PPE1_HASH_SEED	    (PPE1_BASE + 0x244)
+
+#define PPE1_MCAST_L_10       (PPE1_BASE + 0x00)
+#define PPE1_MCAST_H_10       (PPE1_BASE + 0x04)
+
+#define PPE1_DFT_CPORT       (PPE1_BASE + 0x248)
+#define PPE1_DFT_CPORT1      (PPE1_BASE + 0x24c)
+#define PPE1_MCAST_PPSE	     (PPE1_BASE + 0x284)
+#define PPE1_MCAST_L_0       (PPE1_BASE + 0x288)
+#define PPE1_MCAST_H_0       (PPE1_BASE + 0x28C)
+#define PPE1_MCAST_L_1       (PPE1_BASE + 0x290)
+#define PPE1_MCAST_H_1       (PPE1_BASE + 0x294)
+#define PPE1_MCAST_L_2       (PPE1_BASE + 0x298)
+#define PPE1_MCAST_H_2       (PPE1_BASE + 0x29C)
+#define PPE1_MCAST_L_3       (PPE1_BASE + 0x2A0)
+#define PPE1_MCAST_H_3       (PPE1_BASE + 0x2A4)
+#define PPE1_MCAST_L_4       (PPE1_BASE + 0x2A8)
+#define PPE1_MCAST_H_4       (PPE1_BASE + 0x2AC)
+#define PPE1_MCAST_L_5       (PPE1_BASE + 0x2B0)
+#define PPE1_MCAST_H_5       (PPE1_BASE + 0x2B4)
+#define PPE1_MCAST_L_6       (PPE1_BASE + 0x2BC)
+#define PPE1_MCAST_H_6       (PPE1_BASE + 0x2C0)
+#define PPE1_MCAST_L_7       (PPE1_BASE + 0x2C4)
+#define PPE1_MCAST_H_7       (PPE1_BASE + 0x2C8)
+#define PPE1_MCAST_L_8       (PPE1_BASE + 0x2CC)
+#define PPE1_MCAST_H_8       (PPE1_BASE + 0x2D0)
+#define PPE1_MCAST_L_9       (PPE1_BASE + 0x2D4)
+#define PPE1_MCAST_H_9       (PPE1_BASE + 0x2D8)
+#define PPE1_MCAST_L_A       (PPE1_BASE + 0x2DC)
+#define PPE1_MCAST_H_A       (PPE1_BASE + 0x2E0)
+#define PPE1_MCAST_L_B       (PPE1_BASE + 0x2E4)
+#define PPE1_MCAST_H_B       (PPE1_BASE + 0x2E8)
+#define PPE1_MCAST_L_C       (PPE1_BASE + 0x2EC)
+#define PPE1_MCAST_H_C       (PPE1_BASE + 0x2F0)
+#define PPE1_MCAST_L_D       (PPE1_BASE + 0x2F4)
+#define PPE1_MCAST_H_D       (PPE1_BASE + 0x2F8)
+#define PPE1_MCAST_L_E       (PPE1_BASE + 0x2FC)
+#define PPE1_MCAST_H_E       (PPE1_BASE + 0x2E0)
+#define PPE1_MCAST_L_F       (PPE1_BASE + 0x300)
+#define PPE1_MCAST_H_F       (PPE1_BASE + 0x304)
+#define PPE1_MTU_DRP         (PPE1_BASE + 0x308)
+#define PPE1_MTU_VLYR_0      (PPE1_BASE + 0x30C)
+#define PPE1_MTU_VLYR_1      (PPE1_BASE + 0x310)
+#define PPE1_MTU_VLYR_2      (PPE1_BASE + 0x314)
+#define PPE1_VPM_TPID        (PPE1_BASE + 0x318)
+
+
+
+#define CAH_CTRL_PPE1	    (PPE1_BASE + 0x320)
+#define CAH_TAG_SRH_PPE1    (PPE1_BASE + 0x324)
+#define CAH_LINE_RW_PPE1    (PPE1_BASE + 0x328)
+#define CAH_WDATA_PPE1      (PPE1_BASE + 0x32C)
+#define CAH_RDATA_PPE1      (PPE1_BASE + 0x330)
+
+
+
+#define MIB_CFG_PPE1	    (PPE1_BASE + 0x334)
+#define MIB_TB_BASE_PPE1    (PPE1_BASE + 0x338)
+#define MIB_SER_CR_PPE1	    (PPE1_BASE + 0x33C)
+#define MIB_SER_R0_PPE1	    (PPE1_BASE + 0x340)
+#define MIB_SER_R1_PPE1	    (PPE1_BASE + 0x344)
+#define MIB_SER_R2_PPE1	    (PPE1_BASE + 0x348)
+#define MIB_CAH_CTRL_PPE1   (PPE1_BASE + 0x350)
+#define PPE1_6RD_ID	    (PPE1_BASE + 0x36c)
+#define PPE1_SBW_CTRL       (PPE1_BASE + 0x374)
+
+#define NETSYS_DVFS_CFG0		(netsys_base + 0xCC)
+#define NETSYS_DVFS_EN			BIT(0)
+
+#define NETSYS_DVFS_CFG1		(netsys_base + 0xD0)
+#define NETSYS_SW_VC_DVFS_EN		BIT(16)
+#define NETSYS_SW_VC_DVFS_REQ		BIT(17)
+#define NETSYS_SW_VC_DVFS_ACK		BIT(19)
+#define NETSYS_SW_VC_DVFS_VAL_OFFSET	20
+
+#define NETSYS_SW_BW_DVFS_EN		BIT(24)
+#define NETSYS_SW_BW_DVFS_REQ		BIT(25)
+#define NETSYS_SW_BW_DVFS_ACK		BIT(27)
+#define NETSYS_SW_BW_DVFS_VAL_OFFSET	28
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_common.c b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_common.c
new file mode 100755
index 0000000..dee85c1
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_common.c
@@ -0,0 +1,4981 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/if_vlan.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <net/rtnetlink.h>
+#include <net/netevent.h>
+#include <linux/platform_device.h>
+#include "ra_nat.h"
+#include "foe_fdb.h"
+#include "frame_engine.h"
+#include "util.h"
+#include "hnat_ioctl.h"
+#include "hnat_define.h"
+#include "hnat_config.h"
+#include "hnat_dbg_proc.h"
+#include "mcast_tbl.h"
+#include "hnat_common.h"
+
+unsigned int dbg_cpu_reason_cnt[32];
+EXPORT_SYMBOL(dbg_cpu_reason_cnt);
+int hwnat_dbg_entry;
+EXPORT_SYMBOL(hwnat_dbg_entry);
+unsigned int SwitchDslMape;
+EXPORT_SYMBOL(SwitchDslMape);
+int get_brlan;
+u32 br_netmask;
+u32 br0_ip;
+char br0_mac_address[6];
+u32 ppe_sw_fast;
+u32 ppe_hw_fast;
+u8 set_fqos = 0;
+u8 xlat_enable = 1;
+u32 rndis_bind_count = 0;
+u32 rndis_mod = 2;
+
+static const char *const mtk_hnat_feature_name[] = {
+	"GE2_SUPPORT", "HNAT_IPV6", "HNAT_VLAN_TX", "HNAT_MCAST", "HNAT_QDMA", "WARP_WHNAT", "WIFI_HNAT", "HNAT_WAN_P4", "WAN_TO_WLAN_QOS", "HNAT_SP_TAG",
+	"QDMA_TX_RX", "PPE_MIB", "PACKET_SAMPLING", "HNAT_OPENWRT", "HNAT_WLAN_QOS", "WLAN_OPTIMIZE", "UDP_FRAG", "AUTO_MODE", "SEMI_AUTO_MODE", "MANUAL_MODE",
+	"PRE_BIND", "HNAT_IPI", "DBG_IPV6_SIP", "DBG_IPV4_SIP", "DBG_SP", "ETH_QOS"
+};
+
+u8 USE_3T_UDP_FRAG;
+EXPORT_SYMBOL(USE_3T_UDP_FRAG);
+struct foe_entry *ppe_foe_base;
+
+struct foe_entry *ppe1_foe_base;
+EXPORT_SYMBOL(ppe1_foe_base);
+struct MED_HNAT_INFO_HOST *med_info_base;
+EXPORT_SYMBOL(med_info_base);
+struct mib_entry *ppe1_mib_base;
+dma_addr_t ppe1_phy_mib_base;
+dma_addr_t ppe1_phy_foe_base;
+
+struct mib_entry *ppe_mib_base;
+dma_addr_t ppe_phy_mib_base;
+
+EXPORT_SYMBOL(ppe_foe_base);
+dma_addr_t ppe_phy_foe_base;
+struct ps_entry *ppe_ps_base;
+dma_addr_t ppe_phy_ps_base;
+
+
+struct hwnat_ac_args ac_info[64];	/* 1 for LAN, 2 for WAN */
+EXPORT_SYMBOL(ac_info);
+
+int DP_GMAC1;
+int DP_GMAC2;
+int DP_EDMA0;
+int DP_EDMA1;
+int DP_CCMNI0 = MAX_IF_NUM;
+
+#ifdef CONFIG_RAETH_EDMA
+	struct net_device *aqr_dev1;
+	struct net_device *aqr_dev2;
+#endif
+
+/* #define DSCP_REMARK_TEST */
+/* #define PREBIND_TEST */
+#define DD \
+{\
+pr_notice("%s %d\n", __func__, __LINE__); \
+}
+
+/*HWNAT IPI*/
+/*unsigned int ipidbg[NR_CPUS][10];*/
+/*unsigned int ipidbg2[NR_CPUS][10];*/
+/*extern int32_t HnatIPIExtIfHandler(struct sk_buff * skb);*/
+/*extern int32_t HnatIPIForceCPU(struct sk_buff * skb);*/
+/*extern int HnatIPIInit();*/
+/*extern int HnatIPIDeInit();*/
+#if(0)
+void skb_dump(struct sk_buff* sk) {
+        unsigned int i;
+
+        pr_notice("\nskb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
+                sk->dev?sk->dev->name:"ip stack",sk->len,sk->truesize,
+                skb_headroom(sk),skb_tailroom(sk));
+
+        for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data + 30;i++) {
+                if((i % 16) == 0)
+                    pr_notice("\n");
+
+                if(i==(unsigned int)sk->head) pr_notice("@h");
+                if(i==(unsigned int)sk->data) pr_notice("@d");
+                pr_notice("%02X-",*((unsigned char*)i));
+        }
+        pr_notice("\n");
+}
+#endif
+#ifdef CONFIG_RA_HW_NAT_PACKET_SAMPLING
+static inline void hwnat_set_packet_sampling(struct foe_entry *entry)
+{
+	entry->ipv4_hnapt.bfib1.ps = 1;
+}
+#else
+static inline void hwnat_set_packet_sampling(struct foe_entry *entry)
+{
+}
+#endif
+
+static inline void hwnat_set_6rd_id(struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+	reg_modify_bits(PPE_6RD_ID, ntohs(ppe_parse_result->iph.id), 0, 16);
+	reg_modify_bits(PPE1_6RD_ID, ntohs(ppe_parse_result->iph.id), 0, 16);
+	entry->ipv6_6rd.per_flow_6rd_id = 1;
+}
+
+
+uint16_t IS_IF_PCIE_WLAN(struct sk_buff *skb)
+{
+	if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb))
+		return IS_IF_PCIE_WLAN_HEAD(skb);
+	else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb))
+		return IS_IF_PCIE_WLAN_TAIL(skb);
+	else if (IS_MAGIC_TAG_PROTECT_VALID_CB(skb))
+		return IS_IF_PCIE_WLAN_CB(skb);
+	else
+		return 0;
+}
+
+uint16_t is_if_pcie_wlan_rx(struct sk_buff *skb)
+{
+	return IS_IF_PCIE_WLAN_HEAD(skb);
+}
+
+uint16_t is_magic_tag_protect_valid(struct sk_buff *skb)
+{
+	if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb))
+		return IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb);
+	else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb))
+		return IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb);
+	else if (IS_MAGIC_TAG_PROTECT_VALID_CB(skb))
+		return IS_MAGIC_TAG_PROTECT_VALID_CB(skb);
+	else
+		return 0;
+}
+
+unsigned char *FOE_INFO_START_ADDR(struct sk_buff *skb)
+{
+	if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb))
+		return FOE_INFO_START_ADDR_HEAD(skb);
+	else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb))
+		return FOE_INFO_START_ADDR_TAIL(skb);
+	else if (IS_MAGIC_TAG_PROTECT_VALID_CB(skb))
+		return FOE_INFO_START_ADDR_CB(skb);
+
+	pr_notice("!!!FOE_INFO_START_ADDR Error!!!!\n");
+	return FOE_INFO_START_ADDR_HEAD(skb);
+}
+
+void FOE_INFO_DUMP(struct sk_buff *skb)
+{
+	pr_notice("FOE_INFO_START_ADDR(skb) =%p\n", FOE_INFO_START_ADDR(skb));
+	pr_notice("FOE_TAG_PROTECT(skb) =%x\n", FOE_TAG_PROTECT(skb));
+	pr_notice("FOE_ENTRY_NUM(skb) =%x\n", FOE_ENTRY_NUM(skb));
+	pr_notice("FOE_ALG(skb) =%x\n", FOE_ALG(skb));
+	pr_notice("FOE_AI(skb) =%x\n", FOE_AI(skb));
+	pr_notice("FOE_SP(skb) =%x\n", FOE_SP(skb));
+	pr_notice("FOE_IF_IDX(skb) =%x\n", FOE_IF_IDX(skb));
+	pr_notice("FOE_MAGIC_TAG(skb) =%x\n", FOE_MAGIC_TAG(skb));
+	if (fe_feature & WARP_WHNAT) {
+		pr_notice("FOE_WDMA_ID(skb) =%x\n", FOE_WDMA_ID(skb));
+		pr_notice("FOE_RX_ID(skb) =%x\n", FOE_RX_ID(skb));
+		pr_notice("FOE_WC_ID(skb) =%x\n", FOE_WC_ID(skb));
+		pr_notice("FOE_FOE_BSS_IDIF(skb) =%x\n", FOE_BSS_ID(skb));
+	}
+	pr_notice("FOE_MINFO(skb) =%x\n", FOE_MINFO(skb));
+	pr_notice("FOE_MINFO_NTYPE(skb) =%x\n", FOE_MINFO_NTYPE(skb));
+	pr_notice("FOE_MINFO_CHID(skb) =%x\n", FOE_MINFO_CHID(skb));
+}
+
+void FOE_INFO_DUMP_TAIL(struct sk_buff *skb)
+{
+	pr_notice("FOE_INFO_START_ADDR_TAIL(skb) =%p\n", FOE_INFO_START_ADDR_TAIL(skb));
+	pr_notice("FOE_TAG_PROTECT_TAIL(skb) =%x\n", FOE_TAG_PROTECT_TAIL(skb));
+	pr_notice("FOE_ENTRY_NUM_TAIL(skb) =%x\n", FOE_ENTRY_NUM_TAIL(skb));
+	pr_notice("FOE_ALG_TAIL(skb) =%x\n", FOE_ALG_TAIL(skb));
+	pr_notice("FOE_AI_TAIL(skb) =%x\n", FOE_AI_TAIL(skb));
+	pr_notice("FOE_SP_TAIL(skb) =%x\n", FOE_SP_TAIL(skb));
+	pr_notice("FOE_MAGIC_TAG_TAIL(skb) =%x\n", FOE_MAGIC_TAG_TAIL(skb));
+	if (fe_feature & WARP_WHNAT) {
+		pr_notice("FOE_WDMA_ID_TAIL(skb) =%x\n", FOE_WDMA_ID_TAIL(skb));
+		pr_notice("FOE_RX_ID_TAIL(skb) =%x\n", FOE_RX_ID_TAIL(skb));
+		pr_notice("FOE_WC_ID_TAIL(skb) =%x\n", FOE_WC_ID_TAIL(skb));
+		pr_notice("FOE_FOE_BSS_IDIF_TAIL(skb) =%x\n", FOE_BSS_ID_TAIL(skb));
+	}
+}
+
+#if 0
+extern int32_t ppe_parse_layer_info(struct sk_buff *skb, struct pkt_parse_result *ppe_parse_result);
+
+u32 syn_seq = 0;
+u32 ppe_tx_wifi_cnt = 0;
+int32_t ppe_get_tcp_seq(struct sk_buff *skb, const char *func) {
+
+	struct iphdr *iph = NULL;
+	struct tcphdr *th = NULL;
+
+	iph = (struct iphdr *)skb_network_header(skb);
+
+	if (iph->protocol == IPPROTO_TCP) {
+
+		th = (struct tcphdr *)skb_transport_header(skb);
+		if (th->syn == 1)
+			syn_seq = ntohl(th->seq);
+
+		pr_notice("%s %s, source:%u, dest:%u, syn_seq:%u, seq:%u, relative_seq:%u, ppe_tx_wifi_cnt:%u\n", func, __func__,
+			ntohs(th->source), ntohs(th->dest), syn_seq, ntohl(th->seq), ntohl(th->seq) - syn_seq, ppe_tx_wifi_cnt++);
+
+		return ntohl(th->seq);
+	}
+	return -1;
+}
+#endif
+
+
+int hwnat_info_region;
+uint16_t tx_decide_which_region(struct sk_buff *skb)
+{
+	u32 alg_tmp, sp_tmp, entry_tmp, ai_tmp;
+
+	if (IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb) && IS_SPACE_AVAILABLE_HEAD(skb)) {
+		hwnat_info_region = USE_HEAD_ROOM;
+
+		return USE_HEAD_ROOM;	/* use headroom */
+	} else if (IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb) && IS_SPACE_AVAILABLE_TAIL(skb)) {
+		FOE_INFO_START_ADDR(skb);
+		alg_tmp = FOE_ALG_TAIL(skb);;
+		sp_tmp = FOE_SP_TAIL(skb);
+		entry_tmp = FOE_ENTRY_NUM_TAIL(skb);
+		ai_tmp = FOE_AI_TAIL(skb);
+		FOE_SP(skb) = sp_tmp & 0xf;
+		FOE_ENTRY_NUM(skb) = entry_tmp & 0x7fff;
+		FOE_AI(skb) = ai_tmp & 0x1f;
+		FOE_ALG(skb) = alg_tmp & 0x1;
+		FOE_TAG_PROTECT(skb) = FOE_TAG_PROTECT_TAIL(skb);
+		FOE_MAGIC_TAG(skb) = FOE_MAGIC_TAG_TAIL(skb);
+		if (fe_feature & WARP_WHNAT) {
+			FOE_WDMA_ID(skb) = FOE_WDMA_ID_TAIL(skb);
+			FOE_RX_ID(skb) = FOE_RX_ID_TAIL(skb);
+			FOE_WC_ID(skb) = FOE_WC_ID_TAIL(skb);
+			FOE_BSS_ID(skb) = FOE_BSS_ID_TAIL(skb);
+		}
+		FOE_MINFO(skb) = FOE_MINFO_TAIL(skb);
+		FOE_MINFO_NTYPE(skb) = FOE_MINFO_NTYPE_TAIL(skb);
+		FOE_MINFO_CHID(skb)= FOE_MINFO_CHID_TAIL(skb);
+		hwnat_info_region = USE_TAIL_ROOM;
+		return USE_TAIL_ROOM;	/* use tailroom */
+	}
+	hwnat_info_region = ALL_INFO_ERROR;
+	return ALL_INFO_ERROR;
+}
+
+uint16_t remove_vlan_tag(struct sk_buff *skb)
+{
+	struct ethhdr *eth;
+	struct vlan_ethhdr *veth;
+	u16 vir_if_idx;
+
+	if (skb_vlan_tag_present(skb)) { /*hw vlan rx enable*/
+		vir_if_idx = skb_vlan_tag_get(skb) & 0x3fff;
+		skb->vlan_proto = 0;
+		skb->vlan_tci = 0;
+		return vir_if_idx;
+	}
+
+	veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+	/* something wrong */
+	if ((veth->h_vlan_proto != htons(ETH_P_8021Q))) {
+		/* if (pr_debug_ratelimited()) */
+		if (debug_level >= 7)
+			pr_notice("HNAT: Reentry packet is untagged frame?\n");
+		return 65535;
+	}
+	/*we just want to get vid*/
+	vir_if_idx = ntohs(veth->h_vlan_TCI) & 0x3fff;
+
+	if (skb_cloned(skb) || skb_shared(skb)) {
+		struct sk_buff *new_skb;
+
+		new_skb = skb_copy(skb, GFP_ATOMIC);
+		kfree_skb(skb);
+		if (!new_skb)
+			return 65535;
+		skb = new_skb;
+		/*logic error*/
+		/* kfree_skb(new_skb); */
+	}
+
+	/* remove VLAN tag */
+	skb->data = skb_mac_header(skb);
+	skb->mac_header = skb->mac_header + VLAN_HLEN;
+	memmove(skb_mac_header(skb), skb->data, ETH_ALEN * 2);
+
+	skb_pull(skb, VLAN_HLEN);
+	skb->data += ETH_HLEN;	/* pointer to layer3 header */
+	eth = (struct ethhdr *)skb_mac_header(skb);
+
+	skb->protocol = eth->h_proto;
+	return vir_if_idx;
+}
+#ifdef CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT
+extern void __iomem *medmcu_hnat_info_host_base_virt;
+void hnat_info_init(struct device *dev)
+{
+	//dma_addr_t info_phy_base;
+	//u32 info_tbl_size;
+
+	//info_tbl_size = MED_INFO_SIZE * sizeof(struct MED_HNAT_INFO_HOST);
+	//med_info_base = dma_alloc_coherent(dev, info_tbl_size, &info_phy_base, GFP_KERNEL);
+	med_info_base = medmcu_hnat_info_host_base_virt;
+
+	pr_notice("sizeof(struct MED_HNAT_INFO_HOST) = %ld\n", sizeof(struct MED_HNAT_INFO_HOST));
+	//pr_notice("info_tbl_size = %d\n", info_tbl_size);
+	//pr_notice("MED_HNAT_INFO_HOST = %p, fdma_phy_base =%p\n", MED_HNAT_INFO_HOST, fdma_phy_base);
+
+	//reg_write(MEDHW_SSR1_DST_RB0_BASE, info_phy_base);
+	//reg_write(MEDHW_SSR1_DST_RB0_SIZE, MED_INFO_SIZE);
+
+}
+#endif
+static int foe_alloc_tbl(u32 num_of_entry, struct device *dev)
+{
+	u32 foe_tbl_size;
+	u32 mib_tbl_size;
+	dma_addr_t ppe_phy_foebase_tmp;
+
+	foe_tbl_size = num_of_entry * sizeof(struct foe_entry);
+
+	pr_notice("debug : ppe1 sizeof(struct foe_entry) =%ld, foe_tbl_size = %d\n", sizeof(struct foe_entry), foe_tbl_size);
+	ppe_phy_foebase_tmp = reg_read(PPE_FOE_BASE);
+#ifdef CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT
+	hnat_info_init(dev);
+#endif
+	if (ppe_phy_foebase_tmp) {
+		ppe_phy_foe_base = ppe_phy_foebase_tmp;
+		ppe_foe_base = (struct foe_entry *)ppe_virt_foe_base_tmp;
+		pr_notice("***ppe0_foe_base = %p\n", ppe_foe_base);
+		pr_notice("***PpeVirtFoeBase_tmp = %p\n", ppe_virt_foe_base_tmp);
+		if (!ppe_foe_base) {
+			pr_notice("PPE_FOE_BASE=%x\n", reg_read(PPE_FOE_BASE));
+			pr_notice("ppe_foe_base ioremap fail!!!!\n");
+			return 0;
+		}
+	} else {
+		if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+			ppe_foe_base =
+			    dma_alloc_coherent(dev, foe_tbl_size, &ppe_phy_foe_base, GFP_KERNEL);
+		} else {
+			ppe_foe_base = dma_alloc_coherent(NULL, foe_tbl_size, &ppe_phy_foe_base, GFP_KERNEL);
+		}
+
+		ppe_virt_foe_base_tmp = ppe_foe_base;
+		pr_notice("init PpeVirtFoeBase_tmp = %p\n", ppe_virt_foe_base_tmp);
+		pr_notice("init ppe_foe_base = %p\n", ppe_foe_base);
+
+		if (!ppe_foe_base) {
+			pr_notice("first ppe_phy_foe_base fail\n");
+			return 0;
+		}
+	}
+
+	if (!ppe_foe_base) {
+		pr_notice("ppe_foe_base== NULL\n");
+		return 0;
+	}
+
+	reg_write(PPE_FOE_BASE, ppe_phy_foe_base);
+	memset(ppe_foe_base, 0, foe_tbl_size);
+
+	if (fe_feature & PPE_MIB) {
+		mib_tbl_size = num_of_entry * sizeof(struct mib_entry);
+		pr_notice("num_of_entry: foe_tbl_size = %d\n", foe_tbl_size);
+		ppe_mib_base = dma_alloc_coherent(dev, mib_tbl_size, &ppe_phy_mib_base, GFP_KERNEL);
+		if (!ppe_mib_base) {
+			pr_notice("PPE MIB allocate memory fail");
+			return 0;
+		}
+		pr_notice("ppe_mib_base = %p\n",  ppe_mib_base);
+		pr_notice("num_of_entry = %u\n",  num_of_entry);
+		pr_notice("mib_tbl_size = %d\n",  mib_tbl_size);
+		reg_write(MIB_TB_BASE, ppe_phy_mib_base);
+		memset(ppe_mib_base, 0, mib_tbl_size);
+	}
+
+	return 1;
+}
+
+static int foe_alloc_tbl_ppe1(u32 num_of_entry, struct device *dev)
+{
+	u32 foe_tbl_size;
+
+	dma_addr_t ppe1_phy_foebase_tmp;
+	u32 mib_tbl_size;
+
+	foe_tbl_size = num_of_entry * sizeof(struct foe_entry);
+
+	pr_notice("debug : ppe1 sizeof(struct foe_entry) =%ld, foe_tbl_size = %d\n", sizeof(struct foe_entry), foe_tbl_size);
+
+	ppe1_phy_foebase_tmp = reg_read(PPE1_FOE_BASE);
+
+	if (ppe1_phy_foebase_tmp) {
+		ppe1_phy_foe_base = ppe1_phy_foebase_tmp;
+		ppe1_foe_base = (struct foe_entry *)ppe1_virt_foe_base_tmp;
+		pr_notice("***ppe1_foe_base = %p\n", ppe1_foe_base);
+		pr_notice("***PpeVirtFoeBase_tmp = %p\n", ppe1_virt_foe_base_tmp);
+		if (!ppe1_foe_base) {
+			pr_notice("PPE_FOE_BASE=%x\n", reg_read(PPE1_FOE_BASE));
+			pr_notice("ppe_foe_base ioremap fail!!!!\n");
+			return 0;
+		}
+	} else {
+		if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+			ppe1_foe_base =
+			    dma_alloc_coherent(dev, foe_tbl_size, &ppe1_phy_foe_base, GFP_KERNEL);
+		} else {
+			ppe1_foe_base = dma_alloc_coherent(NULL, foe_tbl_size, &ppe1_phy_foe_base, GFP_KERNEL);
+		}
+
+		ppe1_virt_foe_base_tmp = ppe1_foe_base;
+		pr_notice("init Ppe1VirtFoeBase_tmp = %p\n", ppe1_virt_foe_base_tmp);
+		pr_notice("init ppe1_foe_base = %p\n", ppe1_foe_base);
+
+		if (!ppe1_foe_base) {
+			pr_notice("first ppe1_phy_foe_base fail\n");
+			return 0;
+		}
+	}
+
+	if (!ppe1_foe_base) {
+		pr_notice("ppe1_foe_base== NULL\n");
+		return 0;
+	}
+
+	reg_write(PPE1_FOE_BASE, ppe1_phy_foe_base);
+	memset(ppe1_foe_base, 0, foe_tbl_size);
+
+
+	if (fe_feature & PPE_MIB) {
+		mib_tbl_size = num_of_entry * sizeof(struct mib_entry);
+		pr_notice("num_of_entry: foe_tbl_size = %d\n", foe_tbl_size);
+		ppe1_mib_base = dma_alloc_coherent(dev, mib_tbl_size, &ppe1_phy_mib_base, GFP_KERNEL);
+		if (!ppe1_mib_base) {
+			pr_notice("PPE MIB allocate memory fail");
+			return 0;
+		}
+		pr_notice("ppe1_mib_base = %p\n",  ppe1_mib_base);
+		pr_notice("num_of_entry = %u\n",  num_of_entry);
+		pr_notice("mib_tbl_size = %d\n",  mib_tbl_size);
+		reg_write(MIB_TB_BASE_PPE1, ppe1_phy_mib_base);
+		memset(ppe1_mib_base, 0, mib_tbl_size);
+	}
+
+	return 1;
+}
+
+static uint8_t *show_cpu_reason(struct sk_buff *skb)
+{
+	static u8 buf[32];
+
+	switch (FOE_AI(skb)) {
+	case TTL_0:
+		return "IPv4(IPv6) TTL(hop limit)\n";
+	case HAS_OPTION_HEADER:
+		return "Ipv4(IPv6) has option(extension) header\n";
+	case NO_FLOW_IS_ASSIGNED:
+		return "No flow is assigned\n";
+	case IPV4_WITH_FRAGMENT:
+		return "IPv4 HNAT doesn't support IPv4 /w fragment\n";
+	case IPV4_HNAPT_DSLITE_WITH_FRAGMENT:
+		return "IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n";
+	case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP:
+		return "IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n";
+	case IPV6_5T_6RD_WITHOUT_TCP_UDP:
+		return "IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n";
+	case TCP_FIN_SYN_RST:
+		return "Ingress packet is TCP fin/syn/rst\n";
+	case UN_HIT:
+		return "FOE Un-hit\n";
+	case HIT_UNBIND:
+		return "FOE Hit unbind\n";
+	case HIT_UNBIND_RATE_REACH:
+		return "FOE Hit unbind & rate reach\n";
+	case HIT_BIND_TCP_FIN:
+		return "Hit bind PPE TCP FIN entry\n";
+	case HIT_BIND_TTL_1:
+		return "Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1\n";
+	case HIT_BIND_WITH_VLAN_VIOLATION:
+		return "Hit bind and VLAN replacement violation\n";
+	case HIT_BIND_KEEPALIVE_UC_OLD_HDR:
+		return "Hit bind and keep alive with unicast old-header packet\n";
+	case HIT_BIND_KEEPALIVE_MC_NEW_HDR:
+		return "Hit bind and keep alive with multicast new-header packet\n";
+	case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+		return "Hit bind and keep alive with duplicate old-header packet\n";
+	case HIT_BIND_FORCE_TO_CPU:
+		return "FOE Hit bind & force to CPU\n";
+	case HIT_BIND_EXCEED_MTU:
+		return "Hit bind and exceed MTU\n";
+	case HIT_BIND_MULTICAST_TO_CPU:
+		return "Hit bind multicast packet to CPU\n";
+	case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+		return "Hit bind multicast packet to GMAC & CPU\n";
+	case HIT_PRE_BIND:
+		return "Pre bind\n";
+	}
+
+	sprintf(buf, "CPU Reason Error - %X\n", FOE_AI(skb));
+	return buf;
+}
+
+#if (1)
+uint32_t foe_dump_pkt_tx(struct sk_buff *skb, struct foe_entry *entry)
+{
+
+	NAT_PRINT("\nTx===<FOE_Entry=%d, name=%s>=====\n", FOE_ENTRY_NUM(skb), skb->dev->name);
+	foe_dump_pkt(skb, entry);
+
+	return 1;
+#if(0)
+	struct foe_entry *entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+	int i;
+
+	NAT_PRINT("\nTx===<FOE_Entry=%d>=====\n", FOE_ENTRY_NUM(skb));
+	pr_notice("Tx handler skb_headroom size = %u, skb->head = %p, skb->data = %p\n",
+		skb_headroom(skb), skb->head, skb->data);
+	for (i = 0; i < skb_headroom(skb); i++) {
+		pr_notice("tx_skb->head[%d]=%x\n", i, *(unsigned char *)(skb->head + i));
+		/* pr_notice("%02X-",*((unsigned char*)i)); */
+	}
+
+	NAT_PRINT("==================================\n");
+	return 1;
+#endif
+}
+#endif
+
+uint32_t foe_dump_pkt(struct sk_buff *skb, struct foe_entry *entry)
+{
+	//struct foe_entry *entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+
+	//NAT_PRINT("\nRx===<FOE_Entry=%d>=====\n", FOE_ENTRY_NUM(skb));
+	NAT_PRINT("RcvIF=%s\n", skb->dev->name);
+	NAT_PRINT("FOE_Entry=%d\n", FOE_ENTRY_NUM(skb));
+	NAT_PRINT("CPU Reason=%s", show_cpu_reason(skb));
+	NAT_PRINT("ALG=%d\n", FOE_ALG(skb));
+	NAT_PRINT("SP=%d\n", FOE_SP(skb));
+
+	/* some special alert occurred, so entry_num is useless (just skip it) */
+	if (FOE_ENTRY_NUM(skb) == 0x3fff)
+		return 1;
+
+	/* PPE: IPv4 packet=IPV4_HNAT IPv6 packet=IPV6_ROUTE */
+	if (IS_IPV4_GRP(entry)) {
+		NAT_PRINT("Information Block 1=%x\n", entry->ipv4_hnapt.info_blk1);
+		NAT_PRINT("SIP=%s\n", ip_to_str(entry->ipv4_hnapt.sip));
+		NAT_PRINT("DIP=%s\n", ip_to_str(entry->ipv4_hnapt.dip));
+		NAT_PRINT("SPORT=%d\n", entry->ipv4_hnapt.sport);
+		NAT_PRINT("DPORT=%d\n", entry->ipv4_hnapt.dport);
+		NAT_PRINT("Information Block 2=%x\n", entry->ipv4_hnapt.info_blk2);
+		NAT_PRINT("State = %s, proto = %s\n",
+			  entry->bfib1.state ==
+			  0 ? "Invalid" : entry->bfib1.state ==
+			  1 ? "Unbind" : entry->bfib1.state ==
+			  2 ? "BIND" : entry->bfib1.state ==
+			  3 ? "FIN" : "Unknown", entry->ipv4_hnapt.bfib1.udp ==
+			  0 ? "TCP" : entry->ipv4_hnapt.bfib1.udp ==
+			  1 ? "UDP" : "Unknown");
+	}
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_GRP(entry)) {
+			NAT_PRINT("Information Block 1=%x\n", entry->ipv6_5t_route.info_blk1);
+			NAT_PRINT("IPv6_SIP=%08X:%08X:%08X:%08X\n",
+				  entry->ipv6_5t_route.ipv6_sip0,
+				  entry->ipv6_5t_route.ipv6_sip1,
+				  entry->ipv6_5t_route.ipv6_sip2, entry->ipv6_5t_route.ipv6_sip3);
+			NAT_PRINT("IPv6_DIP=%08X:%08X:%08X:%08X\n",
+				  entry->ipv6_5t_route.ipv6_dip0,
+				  entry->ipv6_5t_route.ipv6_dip1,
+				  entry->ipv6_5t_route.ipv6_dip2, entry->ipv6_5t_route.ipv6_dip3);
+			if (IS_IPV6_FLAB_EBL()) {
+				NAT_PRINT("Flow Label=%08X\n", (entry->ipv6_5t_route.sport << 16) |
+					  (entry->ipv6_5t_route.dport));
+			} else {
+				NAT_PRINT("SPORT=%d\n", entry->ipv6_5t_route.sport);
+				NAT_PRINT("DPORT=%d\n", entry->ipv6_5t_route.dport);
+			}
+			NAT_PRINT("Information Block 2=%x\n", entry->ipv6_5t_route.info_blk2);
+			NAT_PRINT("State = %s, proto = %s\n",
+				  entry->bfib1.state ==
+				  0 ? "Invalid" : entry->bfib1.state ==
+				  1 ? "Unbind" : entry->bfib1.state ==
+				  2 ? "BIND" : entry->bfib1.state ==
+				  3 ? "FIN" : "Unknown", entry->ipv6_5t_route.bfib1.udp ==
+				  0 ? "TCP" : entry->ipv6_5t_route.bfib1.udp ==
+				  1 ? "UDP" : "Unknown");
+		}
+	}
+	if ((!IS_IPV4_GRP(entry)) && (!(IS_IPV6_GRP(entry))))
+		NAT_PRINT("unknown Pkt_type=%d\n", entry->bfib1.pkt_type);
+
+	NAT_PRINT("==================================\n");
+	return 1;
+}
+
+uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb)
+{
+	switch (FOE_AI(skb)) {
+	case TTL_0:
+		dbg_cpu_reason_cnt[0]++;
+		return 0;
+	case HAS_OPTION_HEADER:
+		dbg_cpu_reason_cnt[1]++;
+		return 0;
+	case NO_FLOW_IS_ASSIGNED:
+		dbg_cpu_reason_cnt[2]++;
+		return 0;
+	case IPV4_WITH_FRAGMENT:
+		dbg_cpu_reason_cnt[3]++;
+		return 0;
+	case IPV4_HNAPT_DSLITE_WITH_FRAGMENT:
+		dbg_cpu_reason_cnt[4]++;
+		return 0;
+	case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP:
+		dbg_cpu_reason_cnt[5]++;
+		return 0;
+	case IPV6_5T_6RD_WITHOUT_TCP_UDP:
+		dbg_cpu_reason_cnt[6]++;
+		return 0;
+	case TCP_FIN_SYN_RST:
+		dbg_cpu_reason_cnt[7]++;
+		return 0;
+	case UN_HIT:
+		dbg_cpu_reason_cnt[8]++;
+		return 0;
+	case HIT_UNBIND:
+		dbg_cpu_reason_cnt[9]++;
+		return 0;
+	case HIT_UNBIND_RATE_REACH:
+		dbg_cpu_reason_cnt[10]++;
+		return 0;
+	case HIT_BIND_TCP_FIN:
+		dbg_cpu_reason_cnt[11]++;
+		return 0;
+	case HIT_BIND_TTL_1:
+		dbg_cpu_reason_cnt[12]++;
+		return 0;
+	case HIT_BIND_WITH_VLAN_VIOLATION:
+		dbg_cpu_reason_cnt[13]++;
+		return 0;
+	case HIT_BIND_KEEPALIVE_UC_OLD_HDR:
+		dbg_cpu_reason_cnt[14]++;
+		return 0;
+	case HIT_BIND_KEEPALIVE_MC_NEW_HDR:
+		dbg_cpu_reason_cnt[15]++;
+		return 0;
+	case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+		dbg_cpu_reason_cnt[16]++;
+		return 0;
+	case HIT_BIND_FORCE_TO_CPU:
+		dbg_cpu_reason_cnt[17]++;
+		return 0;
+	case HIT_BIND_EXCEED_MTU:
+		dbg_cpu_reason_cnt[18]++;
+		return 0;
+	case HIT_BIND_MULTICAST_TO_CPU:
+		dbg_cpu_reason_cnt[19]++;
+		return 0;
+	case HIT_BIND_MULTICAST_TO_GMAC_CPU:
+		dbg_cpu_reason_cnt[20]++;
+		return 0;
+	case HIT_PRE_BIND:
+		dbg_cpu_reason_cnt[21]++;
+		return 0;
+	}
+
+	return 0;
+}
+
+int get_bridge_info(void)
+{
+	struct net_device *br0_dev;
+	struct in_device *br0_in_dev;
+
+	if (fe_feature & HNAT_OPENWRT)
+		br0_dev = dev_get_by_name(&init_net, "br-lan");
+	else
+		br0_dev = dev_get_by_name(&init_net, "br0");
+
+	if (!br0_dev) {
+		pr_notice("br0_dev = NULL\n");
+		return 1;
+	}
+	br0_in_dev = in_dev_get(br0_dev);
+	if (!br0_in_dev) {
+		pr_notice("br0_in_dev = NULL\n");
+		return 1;
+	}
+	br_netmask = ntohl(br0_in_dev->ifa_list->ifa_mask);
+	br0_ip = ntohl(br0_in_dev->ifa_list->ifa_address);
+	if (br0_dev)
+		dev_put(br0_dev);
+
+	if (br0_in_dev)
+		in_dev_put(br0_in_dev);
+	else
+		pr_notice("br0_in_dev = NULL\n");
+
+	pr_notice("br0_ip = %x\n", br0_ip);
+	pr_notice("br_netmask = %x\n", br_netmask);
+	get_brlan = 1;
+
+	return 0;
+}
+
+int bridge_lan_subnet(struct sk_buff *skb)
+{
+	struct iphdr *iph = NULL;
+	u32 daddr = 0;
+	u32 saddr = 0;
+	u32 eth_type;
+	u32 ppp_tag = 0;
+	struct vlan_hdr *vh = NULL;
+	struct ethhdr *eth = NULL;
+	struct pppoe_hdr *peh = NULL;
+	u8 vlan1_gap = 0;
+	u8 vlan2_gap = 0;
+	u8 pppoe_gap = 0;
+	int ret;
+#ifdef	CONFIG_RAETH_HW_VLAN_TX
+	struct vlan_hdr pseudo_vhdr;
+#endif
+
+	eth = (struct ethhdr *)skb->data;
+	if (is_multicast_ether_addr(&eth->h_dest[0]))
+		return 0;
+	eth_type = eth->h_proto;
+	if ((eth_type == htons(ETH_P_8021Q)) ||
+	    (((eth_type) & 0x00FF) == htons(ETH_P_8021Q)) || skb_vlan_tag_present(skb)) {
+#ifdef	CONFIG_RAETH_HW_VLAN_TX
+		pseudo_vhdr.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+		pseudo_vhdr.h_vlan_encapsulated_proto = eth->h_proto;
+		vh = (struct vlan_hdr *)&pseudo_vhdr;
+		vlan1_gap = VLAN_HLEN;
+#else
+		vlan1_gap = VLAN_HLEN;
+		vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+#endif
+
+		/* VLAN + PPPoE */
+		if (ntohs(vh->h_vlan_encapsulated_proto) == ETH_P_PPP_SES) {
+			pppoe_gap = 8;
+			eth_type = vh->h_vlan_encapsulated_proto;
+			/* Double VLAN = VLAN + VLAN */
+		} else if ((vh->h_vlan_encapsulated_proto == htons(ETH_P_8021Q)) ||
+			   ((vh->h_vlan_encapsulated_proto) & 0x00FF) == htons(ETH_P_8021Q)) {
+			vlan2_gap = VLAN_HLEN;
+			vh = (struct vlan_hdr *)(skb->data + ETH_HLEN + VLAN_HLEN);
+			/* VLAN + VLAN + PPPoE */
+			if (ntohs(vh->h_vlan_encapsulated_proto) == ETH_P_PPP_SES) {
+				pppoe_gap = 8;
+				eth_type = vh->h_vlan_encapsulated_proto;
+			} else {
+				eth_type = vh->h_vlan_encapsulated_proto;
+			}
+		}
+	} else if (ntohs(eth_type) == ETH_P_PPP_SES) {
+		/* PPPoE + IP */
+		pppoe_gap = 8;
+		peh = (struct pppoe_hdr *)(skb->data + ETH_HLEN + vlan1_gap);
+		ppp_tag = peh->tag[0].tag_type;
+	}
+
+	if (get_brlan == 0) {
+		ret = get_bridge_info(); /*return 1 br0 get fail*/
+		if (ret == 1)
+			return 0;
+	}
+	/* set layer4 start addr */
+	if ((eth_type == htons(ETH_P_IP)) || (eth_type == htons(ETH_P_PPP_SES) && ppp_tag == htons(PPP_IP))) {
+		iph = (struct iphdr *)(skb->data + ETH_HLEN + vlan1_gap + vlan2_gap + pppoe_gap);
+		daddr = ntohl(iph->daddr);
+		saddr = ntohl(iph->saddr);
+	}
+
+	if (((br0_ip & br_netmask) == (daddr & br_netmask)) &&
+	    ((daddr & br_netmask) == (saddr & br_netmask)))
+		return 1;
+	return 0;
+}
+
+int bridge_short_cut_rx(struct sk_buff *skb)
+{
+	struct iphdr *iph = NULL;
+	u32 daddr;
+	int ret;
+
+	if (get_brlan == 0) {
+		ret = get_bridge_info(); /*return 1 get br0 fail*/
+		if (ret == 1)
+			return 0;
+	}
+
+	iph = (struct iphdr *)(skb->data);
+	daddr = ntohl(iph->daddr);
+	if ((br0_ip & br_netmask) == (daddr & br_netmask))
+		return 1;
+	else
+		return 0;
+}
+
+/* push different VID for WiFi pseudo interface or USB external NIC */
+uint32_t ppe_extif_rx_handler(struct sk_buff *skb)
+{
+	u16 vir_if_idx = 0;
+	int i = 0;
+	int dev_match = 0;
+	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+
+	if (debug_level >= 10)
+		pr_notice("%s, name = %s\n", __func__, skb->dev->name);
+
+		/* PPE can only handle IPv4/IPv6/PPP packets */
+	if (((skb->protocol != htons(ETH_P_8021Q)) &&
+	    (skb->protocol != htons(ETH_P_IP)) && (skb->protocol != htons(ETH_P_IPV6)) &&
+	    (skb->protocol != htons(ETH_P_PPP_SES)) && (skb->protocol != htons(ETH_P_PPP_DISC))) ||
+			is_multicast_ether_addr(&eth->h_dest[0])) {
+
+			if (debug_level >= 10)
+				pr_notice("%s not support, skb->protocol = 0x%x, multicase:%d\n", __func__, skb->protocol, is_multicast_ether_addr(&eth->h_dest[0]));
+			return 1;
+	}
+
+	if (debug_level >= 10)
+		pr_notice("%s enter, name = %s, protocol = 0x%x, skb-headroom=%d\n", __func__,
+			skb->dev->name, skb->protocol, skb_headroom(skb));
+
+	skb_set_network_header(skb, 0);
+
+#ifdef CONFIG_SUPPORT_WLAN_OPTIMIZE
+		if (bridge_short_cut_rx(skb))
+			return 1;	/* Bridge ==> sw path (rps) */
+#endif
+
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == skb->dev) {
+			vir_if_idx = i;
+			dev_match = 1;
+			if (debug_level >= 7)
+				pr_notice("%s : Interface=%s, vir_if_idx=%x\n", __func__, skb->dev->name, vir_if_idx);
+			break;
+		}
+	}
+
+#ifdef CONFIG_RAETH_EDMA
+	for (i = 1; i < MAX_IF_NUM; i++) {
+		if(dst_port[i]->name == NULL) {
+			pr_err("[HS-ethernet/HWNAT/RX] %s : dst_port[%d] name is NULL\n", __func__, i);
+			return 1;
+		}
+
+		if ((strcmp(dst_port[i]->name, DEV_NAME) == 0 && strcmp(skb->dev->name, AQR_DEV_NAME) == 0) ||
+		    (strcmp(dst_port[i]->name, DEV2_NAME) == 0 && strcmp(skb->dev->name, AQR_DEV2_NAME) == 0)) {
+			vir_if_idx = i;
+			dev_match = 1;
+			if (debug_level >= 7)
+				pr_notice("[HS-ethernet/HWNAT/RX] %s : dev_match ok Interfacess=%s, vir_if_idx=%x\n", __func__, skb->dev->name, vir_if_idx);
+			break;
+		}
+	}
+#endif
+
+	if (dev_match == 0) {
+		if (debug_level >= 1)
+			pr_notice("%s UnKnown Interface, vir_if_idx=%x\n", __func__, vir_if_idx);
+		return 1;
+	}
+
+	/* push vlan tag to stand for actual incoming interface, */
+	/* so HNAT module can know the actual incoming interface from vlan id. */
+	skb_push(skb, ETH_HLEN);/* pointer to layer2 header before calling hard_start_xmit */
+
+#ifdef CONFIG_RAETH_EDMA
+		if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_EDMA0)
+			skb->dev = dst_port[DP_EDMA0];
+		else if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_EDMA1)
+			skb->dev = dst_port[DP_EDMA1];
+		else
+			skb->dev = dst_port[DP_GMAC1];	/* we use GMAC1 to send the packet to PPE */
+#else
+		if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_EDMA0)
+			skb->dev = dst_port[DP_EDMA0];
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_EDMA1)
+			skb->dev = dst_port[DP_EDMA1];
+		else
+			skb->dev = dst_port[DP_GMAC1];	/* we use GMAC1 to send the packet to PPE */
+#endif
+
+#ifdef CONFIG_SUPPORT_WLAN_QOS
+		set_qid(skb);
+#endif
+	skb->vlan_proto = htons(ETH_P_8021Q);
+#ifdef	CONFIG_RAETH_HW_VLAN_TX
+	skb->vlan_tci |= VLAN_TAG_PRESENT;
+	skb->vlan_tci |= vir_if_idx;
+#else
+	skb = vlan_insert_tag(skb, skb->vlan_proto, vir_if_idx);
+	if (skb == NULL) {
+		if (debug_level >= 3)
+			pr_notice("%s, vlan_insert_tag() frees the skb\n", __func__);
+		return 0;
+	}
+#endif
+	if (IS_SPACE_AVAILABLE_HEAD(skb) && IS_SPACE_AVAILABLE_TAIL(skb)) {
+		/* redirect to PPE */
+#ifdef CONFIG_RAETH_EDMA
+		FOE_AI_TAIL(skb) = UN_HIT;
+		FOE_TAG_PROTECT_TAIL(skb) = TAG_PROTECT;
+
+		if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_EDMA0) {
+			FOE_MAGIC_TAG_TAIL(skb) = FOE_MAGIC_PPE0;
+		} else if (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_EDMA1) {
+			FOE_MAGIC_TAG_TAIL(skb) = FOE_MAGIC_PPE1;
+		} else {
+			FOE_MAGIC_TAG_TAIL(skb) = FOE_MAGIC_PPE;
+		}
+#else
+		FOE_AI(skb) = UN_HIT;
+		FOE_AI_TAIL(skb) = UN_HIT;
+		FOE_TAG_PROTECT(skb) = TAG_PROTECT;
+		FOE_TAG_PROTECT_TAIL(skb) = TAG_PROTECT;
+
+		if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_EDMA0) {
+			FOE_MAGIC_TAG(skb) = FOE_MAGIC_PPE0;
+			FOE_MAGIC_TAG_TAIL(skb) = FOE_MAGIC_PPE0;
+		} else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_EDMA1) {
+			FOE_MAGIC_TAG(skb) = FOE_MAGIC_PPE1;
+			FOE_MAGIC_TAG_TAIL(skb) = FOE_MAGIC_PPE1;
+		} else {
+			FOE_MAGIC_TAG(skb) = FOE_MAGIC_PPE;
+			FOE_MAGIC_TAG_TAIL(skb) = FOE_MAGIC_PPE;
+		}
+#endif
+	} else {
+		if (debug_level >= 3)
+			pr_notice("%s, can't fill FOE!\n",
+				__func__);
+		return 1;
+	}
+
+#ifdef CONFIG_SUPPORT_WLAN_QOS
+		/*if (debug_level >= 2)*/
+			/*pr_notice("skb->dev = %s\n", skb->dev);*/
+		if ((!skb->dev) || ((skb->dev != dst_port[DP_GMAC2]) &&
+		    (skb->dev != dst_port[DP_GMAC1])))
+			skb->dev = dst_port[DP_GMAC1];	/* we use GMAC1 to send the packet to PPE */
+#endif
+	if (debug_level >= 10)
+		pr_notice("%s, send to ppe via ETH tx\n", __func__);
+	dev_queue_xmit(skb);
+
+	return 0;
+}
+
+uint32_t ppe_extif_pingpong_handler(struct sk_buff *skb)
+{
+	struct ethhdr *eth = NULL;
+	u16 vir_if_idx = 0, idx;
+	struct net_device *dev;
+#ifdef CONFIG_RAETH_EDMA
+	struct net_device *aqr_dev;
+#endif
+
+	if (skb == NULL) {
+		if (debug_level >= 7)
+			pr_notice("%s skb == NULL\n", __func__);
+		return 1;
+	}
+
+	if (debug_level >= 10)
+		pr_notice("%s, FOE_AI(skb):0x%x, FOE_SP(skb):%d, name:%s\n",
+			__func__, FOE_AI(skb), FOE_SP(skb), skb->dev->name);
+
+	vir_if_idx = remove_vlan_tag(skb);
+
+  	if ((skb == NULL) || (vir_if_idx == 65535)) {
+  		pr_notice("%s, vir_if_idx is 65535\n", __func__);
+  		return 1;
+  	}
+#ifdef CONFIG_RAETH_EDMA
+	if (dst_port[vir_if_idx] != NULL) {
+		if (strcmp(dst_port[vir_if_idx]-> name, DEV_NAME) == 0) {
+			if (aqr_dev1 != NULL) {
+				aqr_dev = aqr_dev1;
+			} else {
+				aqr_dev1 = ra_dev_get_by_name(AQR_DEV_NAME);
+				aqr_dev = aqr_dev1;
+			}
+		} else if (strcmp(dst_port[vir_if_idx]-> name, DEV2_NAME) == 0) {
+			if (aqr_dev2 != NULL) {
+				aqr_dev = aqr_dev2;
+			} else {
+				aqr_dev2 = ra_dev_get_by_name(AQR_DEV2_NAME);
+				aqr_dev = aqr_dev2;
+			}
+		}
+	}
+#endif
+	/* recover to right incoming interface */
+	if (vir_if_idx < MAX_IF_NUM && dst_port[vir_if_idx]) {
+#ifdef CONFIG_RAETH_EDMA
+		if (aqr_dev != NULL) {
+			skb->dev = aqr_dev;
+			if (debug_level >= 7) {
+				pr_notice("[HS-ethernet/HWNAT/RX-pingpong] set the interface id back= %s (AQR)\n", aqr_dev->name);
+                        }
+		} else {
+			skb->dev = dst_port[vir_if_idx];
+			if (debug_level >= 7) {
+				pr_notice("[HS-ethernet/HWNAT/RX-pingpong] set the interface id back= %s \n", skb->dev->name);
+			}
+		}
+#else
+		if (dst_port[vir_if_idx] != NULL)
+			skb->dev = dst_port[vir_if_idx];
+#endif
+
+	} else {
+		if (debug_level >= 1)
+			pr_notice("%s : HNAT: unknown interface (vir_if_idx=%d)\n", __func__, vir_if_idx);
+		return 1;
+	}
+
+	eth = (struct ethhdr *)skb_mac_header(skb);
+
+	if (eth->h_dest[0] & 1) {
+		if (ether_addr_equal(eth->h_dest, skb->dev->broadcast) == 0)
+			skb->pkt_type = PACKET_BROADCAST;
+		else
+			skb->pkt_type = PACKET_MULTICAST;
+	} else {
+		skb->pkt_type = PACKET_OTHERHOST;
+
+#ifdef CONFIG_RAETH_EDMA
+		for (idx = 1; idx < MAX_IF_NUM; idx++) {
+			dev = dst_port[idx];
+			if (strcmp(dev-> name, DEV_NAME) == 0) {
+				if (aqr_dev1 != NULL) {
+					aqr_dev = aqr_dev1;
+				} else {
+					aqr_dev1 = ra_dev_get_by_name(AQR_DEV_NAME);
+					aqr_dev = aqr_dev1;
+				}
+
+				if (aqr_dev && ether_addr_equal(eth->h_dest, aqr_dev->dev_addr) == 0) {
+					//pr_notice("aqr0_dev_addr=%x:%x:%x:%x:%x:%x\n",aqr_dev->dev_addr[0],aqr_dev->dev_addr[1],aqr_dev->dev_addr[2],aqr_dev->dev_addr[3],aqr_dev->dev_addr[4],aqr_dev->dev_addr[5]);
+					skb->pkt_type = PACKET_HOST;
+					break;
+				}
+
+			} else if (strcmp(dev-> name, DEV2_NAME) == 0) {
+				if (aqr_dev2 != NULL) {
+					aqr_dev = aqr_dev2;
+				} else {
+					aqr_dev2 = ra_dev_get_by_name(AQR_DEV2_NAME);
+					aqr_dev = aqr_dev2;
+				}
+
+				if (aqr_dev && ether_addr_equal(eth->h_dest, aqr_dev->dev_addr) == 0) {
+					//pr_notice("aqr1_dev_addr=%x:%x:%x:%x:%x:%x\n",aqr_dev->dev_addr[0],aqr_dev->dev_addr[1],aqr_dev->dev_addr[2],aqr_dev->dev_addr[3],aqr_dev->dev_addr[4],aqr_dev->dev_addr[5]);
+					skb->pkt_type = PACKET_HOST;
+					break;
+				}
+			}
+		}
+#else
+		for (idx = 0; idx < MAX_IF_NUM; idx++) {
+			dev = dst_port[idx];
+			if (dev && ether_addr_equal(eth->h_dest, dev->dev_addr) == 0) {
+				skb->pkt_type = PACKET_HOST;
+				break;
+			}
+		}
+#endif
+	}
+
+
+	if (debug_level >= 7)
+		pr_notice("%s, name = %s, vir_if_idx =%d, pkt_type:%d\n",
+			__func__, skb->dev->name, vir_if_idx, skb->pkt_type);
+
+	return 1;
+}
+
+uint32_t keep_alive_handler(struct sk_buff *skb, struct foe_entry *entry)
+{
+	struct ethhdr *eth = NULL;
+	u16 eth_type = ntohs(skb->protocol);
+	u32 vlan1_gap = 0;
+	u32 vlan2_gap = 0;
+	u32 pppoe_gap = 0;
+	struct vlan_hdr *vh;
+	struct iphdr *iph = NULL;
+	struct tcphdr *th = NULL;
+	struct udphdr *uh = NULL;
+
+/* try to recover to original SMAC/DMAC, but we don't have such information.*/
+/* just use SMAC as DMAC and set Multicast address as SMAC.*/
+	eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+
+	hwnat_memcpy(eth->h_dest, eth->h_source, ETH_ALEN);
+	hwnat_memcpy(eth->h_source, eth->h_dest, ETH_ALEN);
+	eth->h_source[0] = 0x1;	/* change to multicast packet, make bridge not learn this packet */
+	if (eth_type == ETH_P_8021Q) {
+		vlan1_gap = VLAN_HLEN;
+		vh = (struct vlan_hdr *)skb->data;
+
+		if (ntohs(vh->h_vlan_TCI) == wan_vid) {
+			/* It make packet like coming from LAN port */
+			vh->h_vlan_TCI = htons(lan_vid);
+
+		} else {
+			/* It make packet like coming from WAN port */
+			vh->h_vlan_TCI = htons(wan_vid);
+		}
+
+		if (ntohs(vh->h_vlan_encapsulated_proto) == ETH_P_PPP_SES) {
+			pppoe_gap = 8;
+		} else if (ntohs(vh->h_vlan_encapsulated_proto) == ETH_P_8021Q) {
+			vlan2_gap = VLAN_HLEN;
+			vh = (struct vlan_hdr *)(skb->data + VLAN_HLEN);
+
+			/* VLAN + VLAN + PPPoE */
+			if (ntohs(vh->h_vlan_encapsulated_proto) == ETH_P_PPP_SES) {
+				pppoe_gap = 8;
+			} else {
+				/* VLAN + VLAN + IP */
+				eth_type = ntohs(vh->h_vlan_encapsulated_proto);
+			}
+		} else {
+			/* VLAN + IP */
+			eth_type = ntohs(vh->h_vlan_encapsulated_proto);
+		}
+	}
+
+	/* Only Ipv4 NAT need KeepAlive Packet to refresh iptable */
+	if (eth_type == ETH_P_IP) {
+		iph = (struct iphdr *)(skb->data + vlan1_gap + vlan2_gap + pppoe_gap);
+		/* Recover to original layer 4 header */
+		if (iph->protocol == IPPROTO_TCP) {
+			th = (struct tcphdr *)((uint8_t *)iph + iph->ihl * 4);
+			foe_to_org_tcphdr(entry, iph, th);
+
+		} else if (iph->protocol == IPPROTO_UDP) {
+			uh = (struct udphdr *)((uint8_t *)iph + iph->ihl * 4);
+			foe_to_org_udphdr(entry, iph, uh);
+		}
+		/* Recover to original layer 3 header */
+		foe_to_org_iphdr(entry, iph);
+		skb->pkt_type = PACKET_HOST;
+	} else if (eth_type == ETH_P_IPV6) {
+		skb->pkt_type = PACKET_HOST;
+	} else {
+		skb->pkt_type = PACKET_HOST;
+	}
+/* Ethernet driver will call eth_type_trans() to update skb->pkt_type.*/
+/* If(destination mac != my mac)*/
+/*   skb->pkt_type=PACKET_OTHERHOST;*/
+/* In order to pass ip_rcv() check, we change pkt_type to PACKET_HOST here*/
+/*	skb->pkt_type = PACKET_HOST;*/
+	return 1;
+}
+
+uint32_t keep_alive_old_pkt_handler(struct sk_buff *skb)
+{
+	struct ethhdr *eth = NULL;
+	u16 vir_if_idx = 0;
+	struct net_device *dev;
+
+	if (skb == NULL) {
+		if (debug_level >= 7)
+			pr_notice("%s skb == NULL\n", __func__);
+		return 1;
+	}
+
+	if ((FOE_SP(skb) == 0) || (FOE_SP(skb) == 5)) {
+
+		vir_if_idx = remove_vlan_tag(skb);
+
+		/* recover to right incoming interface */
+		if (vir_if_idx < MAX_IF_NUM && dst_port[vir_if_idx]) {
+			skb->dev = dst_port[vir_if_idx];
+		} else {
+					pr_notice("%s unknown If (vir_if_idx=%d)\n",  __func__, vir_if_idx);
+					return 1;
+				}
+
+		}
+
+	eth = (struct ethhdr *)skb_mac_header(skb);
+
+	if (eth->h_dest[0] & 1) {
+		if (ether_addr_equal(eth->h_dest, skb->dev->broadcast) == 0)
+			skb->pkt_type = PACKET_BROADCAST;
+		else
+			skb->pkt_type = PACKET_MULTICAST;
+	} else {
+		skb->pkt_type = PACKET_OTHERHOST;
+		for (vir_if_idx = 0; vir_if_idx < MAX_IF_NUM; vir_if_idx++) {
+			dev = dst_port[vir_if_idx];
+			if (dev && ether_addr_equal(eth->h_dest, dev->dev_addr) == 0) {
+				skb->pkt_type = PACKET_HOST;
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int hitbind_force_to_cpu_handler(struct sk_buff *skb, struct foe_entry *entry)
+{
+	uint32_t act_dp = 0;
+#ifdef CONFIG_RAETH_EDMA
+	struct net_device *aqr_dev;
+#endif
+        if (debug_level >= 10)
+		pr_notice("%s, FOE_AI(skb):0x%x, FOE_SP(skb):%d\n", __func__, FOE_AI(skb), FOE_SP(skb));
+
+
+	if (skb == NULL) {
+		if (debug_level >= 7)
+			pr_notice("%s, skb == NULL\n", __func__);
+		return 1;
+	}
+
+	act_dp = get_act_dp(entry);
+	skb->dev = dst_port[act_dp];
+
+#ifdef CONFIG_RAETH_EDMA
+
+	if(FOE_SP(skb) == MDMA_PSE_PORT) {
+		// MD loopback,scenario AQR0 <=> MD force bind
+
+		if (debug_level >= 7) {
+			pr_notice("[HS-ethernet/HWNAT/RX-bind] md loopback");
+		}
+
+		if (aqr_dev1 != NULL) {
+			aqr_dev = aqr_dev1;
+		} else {
+			aqr_dev1 = ra_dev_get_by_name(AQR_DEV_NAME);
+			aqr_dev = aqr_dev1;
+		}
+		skb->dev = aqr_dev;
+
+	} else {
+
+		if (strcmp(dst_port[entry->ipv4_hnapt.act_dp]-> name, DEV_NAME) == 0) {
+			if (debug_level >= 7) {
+				pr_notice("[HS-ethernet/HWNAT/RX-bind] hitbind_force_to_cpu_handler aqr0");
+			}
+
+			if (aqr_dev1 != NULL) {
+				aqr_dev = aqr_dev1;
+			} else {
+				aqr_dev1 = ra_dev_get_by_name(AQR_DEV_NAME);
+				aqr_dev = aqr_dev1;
+			}
+			skb->dev = aqr_dev;
+		} else if (strcmp(dst_port[entry->ipv4_hnapt.act_dp]-> name, DEV2_NAME) == 0) {
+			if (debug_level >= 7) {
+				pr_notice("[HS-ethernet/HWNAT/RX-bind] hitbind_force_to_cpu_handler aqr1");
+			}
+
+			if (aqr_dev2 != NULL) {
+				aqr_dev = aqr_dev2;
+			} else {
+				aqr_dev2 = ra_dev_get_by_name(AQR_DEV2_NAME);
+				aqr_dev = aqr_dev2;
+			}
+			skb->dev = aqr_dev;
+		}
+	}
+#endif
+
+	/* interface is unknown */
+	if (!skb->dev) {
+		if (debug_level >= 1)
+			pr_notice("%s, interface is unknown\n", __func__);
+		kfree_skb(skb);
+		return 0;
+	}
+	skb_set_network_header(skb, 0);
+	skb_push(skb, ETH_HLEN);	/* pointer to layer2 header */
+
+
+	if (debug_level >= 7)
+		pr_notice("%s, bind to cpu done if name = %s\n",  __func__, skb->dev->name);
+
+
+
+	dev_queue_xmit(skb);
+	return 0;
+}
+
+int hitbind_force_mcast_to_wifi_handler(struct sk_buff *skb)
+{
+	//int i = 0;
+	//struct sk_buff *skb2;
+#if(0)
+	if (fe_feature & WIFI_HNAT) {
+		if (!(fe_feature & GE2_SUPPORT))
+			remove_vlan_tag(skb);	/* pointer to layer3 header */
+		/*if we only use GMAC1, we need to use vlan id to identify LAN/WAN port*/
+		/*otherwise, CPU send untag packet to switch so we don't need to*/
+		/*remove vlan tag before sending to WiFi interface*/
+
+		skb_set_network_header(skb, 0);
+		skb_push(skb, ETH_HLEN);	/* pointer to layer2 header */
+
+		for (i = 0; i < MAX_IF_NUM; i++) {
+			if ((strncmp(dst_port[i]->name, "eth", 3) != 0)) {
+				skb2 = skb_clone(skb, GFP_ATOMIC);
+
+				if (!skb2)
+					return -ENOMEM;
+
+				skb2->dev = dst_port[i];
+				dev_queue_xmit(skb2);
+			}
+		}
+	}
+	kfree_skb(skb);
+#endif
+
+//dvt test harry
+if (debug_level >= 8) {
+	pr_notice("muticast to CPU\n");
+}
+	return 0;
+}
+
+void get_cpu_reason_entry(int cpu_reason, struct sk_buff *skb)
+{
+	if (FOE_AI(skb) == cpu_reason)
+		hwnat_dbg_entry = FOE_ENTRY_NUM(skb);
+}
+
+int32_t get_pppoe_sid(struct sk_buff *skb, uint32_t vlan_gap, u16 *sid, uint16_t *ppp_tag)
+{
+	struct pppoe_hdr *peh = NULL;
+
+	peh = (struct pppoe_hdr *)(skb->data + ETH_HLEN + vlan_gap);
+
+	if (debug_level >= 6) {
+		NAT_PRINT("\n==============\n");
+		NAT_PRINT(" Ver=%d\n", peh->ver);
+		NAT_PRINT(" Type=%d\n", peh->type);
+		NAT_PRINT(" Code=%d\n", peh->code);
+		NAT_PRINT(" sid=%x\n", ntohs(peh->sid));
+		NAT_PRINT(" Len=%d\n", ntohs(peh->length));
+		NAT_PRINT(" tag_type=%x\n", ntohs(peh->tag[0].tag_type));
+		NAT_PRINT(" tag_len=%d\n", ntohs(peh->tag[0].tag_len));
+		NAT_PRINT("=================\n");
+	}
+
+	*ppp_tag = peh->tag[0].tag_type;
+	if (fe_feature & HNAT_IPV6) {
+		if (peh->ver != 1 || peh->type != 1 ||
+		    (*ppp_tag != htons(PPP_IP) &&
+		    *ppp_tag != htons(PPP_IPV6))) {
+			return 1;
+		    }
+	} else {
+		if (peh->ver != 1 || peh->type != 1 || *ppp_tag != htons(PPP_IP))
+			return 1;
+	}
+
+	*sid = peh->sid;
+	return 0;
+}
+
+/* HNAT_V2 can push special tag */
+int32_t is_special_tag(u16 eth_type, struct pkt_parse_result *ppe_parse_result)
+{
+	/* Please modify this function to speed up the packet with special tag
+	 * Ex:
+	 *    Ralink switch = 0x81xx
+	 *    Realtek switch = 0x8899
+	 */
+	if ((eth_type & 0x00FF) == htons(ETH_P_8021Q)) {	/* Ralink Special Tag: 0x81xx */
+		ppe_parse_result->vlan_tag = eth_type;
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+int32_t is8021Q(u16 eth_type, struct pkt_parse_result *ppe_parse_result)
+{
+	if (eth_type == htons(ETH_P_8021Q)) {
+		ppe_parse_result->vlan_tag = eth_type;
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+int32_t is_hw_vlan_tx(struct sk_buff *skb, struct pkt_parse_result *ppe_parse_result)
+{
+#ifdef CONFIG_RAETH_HW_VLAN_TX
+		if (skb_vlan_tag_present(skb)) {
+			ppe_parse_result->vlan_tag = htons(ETH_P_8021Q);
+			return 1;
+		} else {
+			return 0;
+		}
+#else
+		return 0;
+#endif
+}
+
+int32_t ppe_parse_layer_med(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+
+	struct iphdr *iph = NULL;
+	struct ipv6hdr *ip6h = NULL;
+	struct tcphdr *th = NULL;
+	struct udphdr *uh = NULL;
+	u8 ipv6_head_len = 0;
+
+	// dvt use
+	USE_3T_UDP_FRAG = 1;
+
+	memset(ppe_parse_result, 0, sizeof(*ppe_parse_result));
+	//hwnat_memcpy(ppe_parse_result->dmac, eth->h_dest, ETH_ALEN);
+	//hwnat_memcpy(ppe_parse_result->smac, eth->h_source, ETH_ALEN);
+	ppe_parse_result->dmac[0] = 00;
+	ppe_parse_result->dmac[1] = 00;
+	ppe_parse_result->dmac[2] = 00;
+	ppe_parse_result->dmac[3] = 01;
+	ppe_parse_result->dmac[4] = 00;
+	ppe_parse_result->dmac[5] = 00;
+
+	ppe_parse_result->smac[0] = 00;
+	ppe_parse_result->smac[1] = 00;
+	ppe_parse_result->smac[2] = 00;
+	ppe_parse_result->smac[3] = 01;
+	ppe_parse_result->smac[4] = 00;
+	ppe_parse_result->smac[5] = 00;
+
+
+	/* we cannot speed up multicase packets because both wire and wireless PCs might join same multicast group. */
+	ppe_parse_result->is_mcast = 0;
+	ppe_parse_result->vlan_layer = 0;
+	/* set layer2 start addr */
+
+	//skb_set_mac_header(skb, 0);
+
+	/* set layer3 start addr */
+
+
+	skb_set_network_header(skb, 0);
+
+	/* set layer4 start addr */
+
+	iph = (struct iphdr *)skb_network_header(skb);
+	memcpy(&ppe_parse_result->iph, iph, sizeof(struct iphdr));
+
+	if (iph->version ==4) {
+		if (iph->protocol == IPPROTO_TCP) {
+			if (debug_level >= 6)
+				pr_notice("MD TX TCP!!!!!\n");
+			skb_set_transport_header(skb, (iph->ihl * 4));
+			th = (struct tcphdr *)skb_transport_header(skb);
+
+			memcpy(&ppe_parse_result->th, th, sizeof(struct tcphdr));
+
+			if (entry->ipv4_hnapt.bfib1.pkt_type == IPV6_5T_ROUTE)
+				ppe_parse_result->pkt_type = IPV6_6RD;
+			else
+				ppe_parse_result->pkt_type = IPV4_HNAPT;
+
+			if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+		if (debug_level >= 6)
+			DD;
+				return 1;
+			}
+		} else if (iph->protocol == IPPROTO_UDP) {
+			if (debug_level >= 6)
+				pr_notice("MD TX UDP!!!!!\n");
+			skb_set_transport_header(skb, (iph->ihl * 4));
+			uh = (struct udphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_result->uh, uh, sizeof(struct udphdr));
+
+			if (entry->ipv4_hnapt.bfib1.pkt_type == IPV6_5T_ROUTE)
+				ppe_parse_result->pkt_type = IPV6_6RD;
+			else
+				ppe_parse_result->pkt_type = IPV4_HNAPT;
+
+			if (iph->frag_off & htons(IP_MF | IP_OFFSET))
+				if (USE_3T_UDP_FRAG == 0) {
+					return 1;
+				}
+		} else if (iph->protocol == IPPROTO_GRE) {
+			/* do nothing */
+		if (debug_level >= 6)
+			DD;
+			return 1;
+		}
+	} else {
+		ip6h = (struct ipv6hdr *)skb_network_header(skb);
+
+		if (ip6h-> version == 6) {
+			memcpy(&ppe_parse_result->ip6h, ip6h, sizeof(struct ipv6hdr));
+
+			if (ip6h->nexthdr == NEXTHDR_TCP) {
+				if (debug_level >= 6)
+					pr_notice("ipv6 + TCP\n");
+				skb_set_transport_header(skb, (sizeof(struct ipv6hdr)));
+				th = (struct tcphdr *)skb_transport_header(skb);
+				memcpy(&ppe_parse_result->th, th, sizeof(struct tcphdr));
+
+
+				if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_HNAPT) {
+					ppe_parse_result->pkt_type = IPV4_DSLITE;
+					if (xlat_enable == 1)
+						return 1;
+				} else
+					ppe_parse_result->pkt_type = IPV6_5T_ROUTE;
+
+			} else if (ip6h->nexthdr == NEXTHDR_UDP) {
+				if (debug_level >= 6)
+					pr_notice("ipv6 + UDP\n");
+				skb_set_transport_header(skb, (sizeof(struct ipv6hdr)));
+				uh = (struct udphdr *)skb_transport_header(skb);
+				memcpy(&ppe_parse_result->uh, uh, sizeof(struct udphdr));
+
+
+				if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_HNAPT) {
+					ppe_parse_result->pkt_type = IPV4_DSLITE;
+					if (xlat_enable == 1)
+						return 1;
+				} else
+					ppe_parse_result->pkt_type = IPV6_5T_ROUTE;
+
+			} else if (ip6h->nexthdr == NEXTHDR_IPIP) {
+				ipv6_head_len = sizeof(struct iphdr);
+				memcpy(&ppe_parse_result->iph, ip6h + ipv6_head_len,
+				       sizeof(struct iphdr));
+				ppe_parse_result->pkt_type = IPV4_DSLITE;
+			} else {
+				ppe_parse_result->pkt_type = IPV6_3T_ROUTE;
+			}
+		}else {
+			if (debug_level >= 6)
+				pr_notice("Not support protocol = %x\n", ip6h-> version);
+		}
+	}
+
+	if (debug_level >= 6) {
+		pr_notice("--------------\n");
+		pr_notice("DMAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+			ppe_parse_result->dmac[0], ppe_parse_result->dmac[1],
+			 ppe_parse_result->dmac[2], ppe_parse_result->dmac[3],
+			 ppe_parse_result->dmac[4], ppe_parse_result->dmac[5]);
+		pr_notice("SMAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+			ppe_parse_result->smac[0], ppe_parse_result->smac[1],
+			 ppe_parse_result->smac[2], ppe_parse_result->smac[3],
+			 ppe_parse_result->smac[4], ppe_parse_result->smac[5]);
+		pr_notice("Eth_Type=%x\n", ppe_parse_result->eth_type);
+		if (ppe_parse_result->vlan1_gap > 0)
+			pr_notice("VLAN1 ID=%x\n", ntohs(ppe_parse_result->vlan1));
+
+		if (ppe_parse_result->vlan2_gap > 0)
+			pr_notice("VLAN2 ID=%x\n", ntohs(ppe_parse_result->vlan2));
+
+		if (ppe_parse_result->pppoe_gap > 0) {
+			pr_notice("PPPOE Session ID=%x\n", ppe_parse_result->pppoe_sid);
+			pr_notice("PPP Tag=%x\n", ntohs(ppe_parse_result->ppp_tag));
+		}
+		pr_notice("PKT_TYPE=%s\n",
+			ppe_parse_result->pkt_type ==
+			 0 ? "IPV4_HNAPT" : ppe_parse_result->pkt_type ==
+			 1 ? "IPV4_HNAT" : ppe_parse_result->pkt_type ==
+			 3 ? "IPV4_DSLITE" : ppe_parse_result->pkt_type ==
+			 5 ? "IPV6_ROUTE" : ppe_parse_result->pkt_type == 7 ? "IPV6_6RD" : "Unknown");
+		if (ppe_parse_result->pkt_type == IPV4_HNAT) {
+			pr_notice("SIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.saddr)));
+			pr_notice("DIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.daddr)));
+			pr_notice("TOS=%x\n", ntohs(ppe_parse_result->iph.tos));
+		} else if (ppe_parse_result->pkt_type == IPV4_HNAPT) {
+			pr_notice("SIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.saddr)));
+			pr_notice("DIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.daddr)));
+			pr_notice("TOS=%x\n", ntohs(ppe_parse_result->iph.tos));
+
+			if (ppe_parse_result->iph.protocol == IPPROTO_TCP) {
+				pr_notice("TCP SPORT=%d\n", ntohs(ppe_parse_result->th.source));
+				pr_notice("TCP DPORT=%d\n", ntohs(ppe_parse_result->th.dest));
+			} else if (ppe_parse_result->iph.protocol == IPPROTO_UDP) {
+				pr_notice("UDP SPORT=%d\n", ntohs(ppe_parse_result->uh.source));
+				pr_notice("UDP DPORT=%d\n", ntohs(ppe_parse_result->uh.dest));
+			}
+		} else if (ppe_parse_result->pkt_type == IPV6_5T_ROUTE) {
+			pr_notice("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+				ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[0]),
+			     ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[1]),
+			     ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[2]),
+			     ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[3]),
+			     ntohs(ppe_parse_result->th.source),
+			     ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[0]),
+			     ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[1]),
+			     ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[2]),
+			     ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[3]),
+			     ntohs(ppe_parse_result->th.dest));
+		} else if (ppe_parse_result->pkt_type == IPV6_6RD) {
+			/* fill in ipv4 6rd entry */
+			pr_notice("packet_type = IPV6_6RD\n");
+			pr_notice("SIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.saddr)));
+			pr_notice("DIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.daddr)));
+
+			pr_notice("Checksum=%x\n", ntohs(ppe_parse_result->iph.check));
+			pr_notice("ipV4 ID =%x\n", ntohs(ppe_parse_result->iph.id));
+			pr_notice("Flag=%x\n", ntohs(ppe_parse_result->iph.frag_off) >> 13);
+			pr_notice("TTL=%x\n", ppe_parse_result->iph.ttl);
+			pr_notice("TOS=%x\n", ppe_parse_result->iph.tos);
+		}
+	}
+
+	return 0;
+}
+
+int32_t ppe_parse_layer_info(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+	struct vlan_hdr *vh = NULL;
+	struct ethhdr *eth = NULL;
+	struct iphdr *iph = NULL;
+	struct ipv6hdr *ip6h = NULL;
+	struct tcphdr *th = NULL;
+	struct udphdr *uh = NULL;
+	u8 ipv6_head_len = 0;
+#ifdef	CONFIG_RAETH_HW_VLAN_TX
+	struct vlan_hdr pseudo_vhdr;
+#endif
+	ppe_parse_result->vlan_layer = 0;
+	ppe_parse_result->vlan_tag = 0;
+	// dvt use
+	USE_3T_UDP_FRAG = 1;
+
+	memset(ppe_parse_result, 0, sizeof(*ppe_parse_result));
+	eth = (struct ethhdr *)skb->data;
+	hwnat_memcpy(ppe_parse_result->dmac, eth->h_dest, ETH_ALEN);
+	hwnat_memcpy(ppe_parse_result->smac, eth->h_source, ETH_ALEN);
+	ppe_parse_result->eth_type = eth->h_proto;
+	/* we cannot speed up multicase packets because both wire and wireless PCs might join same multicast group. */
+	if (fe_feature & HNAT_MCAST) {
+		if (is_multicast_ether_addr(&eth->h_dest[0]))
+			ppe_parse_result->is_mcast = 1;
+		else
+			ppe_parse_result->is_mcast = 0;
+	} else {
+		if (is_multicast_ether_addr(&eth->h_dest[0])) {
+		if (debug_level >= 6)
+			DD;
+			return 1;
+		}
+	}
+
+	if (is8021Q(ppe_parse_result->eth_type, ppe_parse_result) ||
+	    is_special_tag(ppe_parse_result->eth_type, ppe_parse_result) ||
+	    is_hw_vlan_tx(skb, ppe_parse_result)) {
+
+#ifdef CONFIG_RAETH_HW_VLAN_TX
+			ppe_parse_result->vlan1_gap = 0;
+			ppe_parse_result->vlan_layer++;
+			pseudo_vhdr.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+			pseudo_vhdr.h_vlan_encapsulated_proto = eth->h_proto;
+			vh = (struct vlan_hdr *)&pseudo_vhdr;
+#else
+			ppe_parse_result->vlan1_gap = VLAN_HLEN;
+			ppe_parse_result->vlan_layer++;
+			vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+#endif
+		ppe_parse_result->vlan1 = vh->h_vlan_TCI;
+		/* VLAN + PPPoE */
+		if (ntohs(vh->h_vlan_encapsulated_proto) == ETH_P_PPP_SES) {
+			ppe_parse_result->pppoe_gap = 8;
+			if (get_pppoe_sid(skb, ppe_parse_result->vlan1_gap,
+					  &ppe_parse_result->pppoe_sid,
+					  &ppe_parse_result->ppp_tag)) {
+		if (debug_level >= 6)
+			DD;
+				return 1;
+			}
+			ppe_parse_result->eth_type = vh->h_vlan_encapsulated_proto;
+			/* Double VLAN = VLAN + VLAN */
+		} else if (is8021Q(vh->h_vlan_encapsulated_proto, ppe_parse_result) ||
+			   is_special_tag(vh->h_vlan_encapsulated_proto, ppe_parse_result)) {
+			ppe_parse_result->vlan2_gap = VLAN_HLEN;
+			ppe_parse_result->vlan_layer++;
+			vh = (struct vlan_hdr *)(skb->data + ETH_HLEN + ppe_parse_result->vlan1_gap);
+			ppe_parse_result->vlan2 = vh->h_vlan_TCI;
+
+			/* VLAN + VLAN + PPPoE */
+			if (ntohs(vh->h_vlan_encapsulated_proto) == ETH_P_PPP_SES) {
+				ppe_parse_result->pppoe_gap = 8;
+				if (get_pppoe_sid
+				    (skb,
+				     (ppe_parse_result->vlan1_gap + ppe_parse_result->vlan2_gap),
+				     &ppe_parse_result->pppoe_sid, &ppe_parse_result->ppp_tag)) {
+		if (debug_level >= 6)
+			DD;
+					return 1;
+				}
+				ppe_parse_result->eth_type = vh->h_vlan_encapsulated_proto;
+			} else if (is8021Q(vh->h_vlan_encapsulated_proto, ppe_parse_result)) {
+				/* VLAN + VLAN + VLAN */
+				ppe_parse_result->vlan_layer++;
+				vh = (struct vlan_hdr *)(skb->data + ETH_HLEN +
+							 ppe_parse_result->vlan1_gap + VLAN_HLEN);
+
+				/* VLAN + VLAN + VLAN */
+				if (is8021Q(vh->h_vlan_encapsulated_proto, ppe_parse_result))
+					ppe_parse_result->vlan_layer++;
+			} else {
+				/* VLAN + VLAN + IP */
+				ppe_parse_result->eth_type = vh->h_vlan_encapsulated_proto;
+			}
+		} else {
+			/* VLAN + IP */
+			ppe_parse_result->eth_type = vh->h_vlan_encapsulated_proto;
+		}
+	} else if (ntohs(ppe_parse_result->eth_type) == ETH_P_PPP_SES) {
+		/* PPPoE + IP */
+		ppe_parse_result->pppoe_gap = 8;
+		if (get_pppoe_sid(skb, ppe_parse_result->vlan1_gap,
+				  &ppe_parse_result->pppoe_sid,
+				  &ppe_parse_result->ppp_tag)) {
+		if (debug_level >= 6)
+			DD;
+			return 1;
+		}
+	}
+	/* set layer2 start addr */
+
+	skb_set_mac_header(skb, 0);
+
+	/* set layer3 start addr */
+
+	skb_set_network_header(skb, ETH_HLEN + ppe_parse_result->vlan1_gap +
+			       ppe_parse_result->vlan2_gap + ppe_parse_result->pppoe_gap);
+
+	/* set layer4 start addr */
+	if ((ppe_parse_result->eth_type == htons(ETH_P_IP)) ||
+	    (ppe_parse_result->eth_type == htons(ETH_P_PPP_SES) &&
+	    (ppe_parse_result->ppp_tag == htons(PPP_IP)))) {
+		iph = (struct iphdr *)skb_network_header(skb);
+		memcpy(&ppe_parse_result->iph, iph, sizeof(struct iphdr));
+
+		if (iph->protocol == IPPROTO_TCP) {
+			skb_set_transport_header(skb, ETH_HLEN + ppe_parse_result->vlan1_gap +
+						 ppe_parse_result->vlan2_gap +
+						 ppe_parse_result->pppoe_gap + (iph->ihl * 4));
+			th = (struct tcphdr *)skb_transport_header(skb);
+
+			memcpy(&ppe_parse_result->th, th, sizeof(struct tcphdr));
+
+			if (entry->ipv4_hnapt.bfib1.pkt_type == IPV6_5T_ROUTE)
+				ppe_parse_result->pkt_type = IPV6_6RD;
+
+			else
+				ppe_parse_result->pkt_type = IPV4_HNAPT;
+
+			if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+		if (debug_level >= 6)
+			DD;
+				return 1;
+			}
+		} else if (iph->protocol == IPPROTO_UDP) {
+			skb_set_transport_header(skb, ETH_HLEN + ppe_parse_result->vlan1_gap +
+						 ppe_parse_result->vlan2_gap +
+						 ppe_parse_result->pppoe_gap + (iph->ihl * 4));
+			uh = (struct udphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_result->uh, uh, sizeof(struct udphdr));
+
+			if (entry->ipv4_hnapt.bfib1.pkt_type == IPV6_5T_ROUTE)
+				ppe_parse_result->pkt_type = IPV6_6RD;
+			else
+				ppe_parse_result->pkt_type = IPV4_HNAPT;
+
+			if (iph->frag_off & htons(IP_MF | IP_OFFSET))
+				if (USE_3T_UDP_FRAG == 0) {
+		if (debug_level >= 6)
+			DD;
+				return 1;
+				}
+		} else if (iph->protocol == IPPROTO_GRE) {
+			/* do nothing */
+		if (debug_level >= 6)
+			DD;
+			return 1;
+		}
+		if (fe_feature & HNAT_IPV6) {
+			if (iph->protocol == IPPROTO_IPV6) {
+				ip6h = (struct ipv6hdr *)((uint8_t *)iph + iph->ihl * 4);
+				memcpy(&ppe_parse_result->ip6h, ip6h, sizeof(struct ipv6hdr));
+
+				if (ip6h->nexthdr == NEXTHDR_TCP) {
+					skb_set_transport_header(skb, ETH_HLEN + ppe_parse_result->vlan1_gap +
+								 ppe_parse_result->vlan2_gap +
+								 ppe_parse_result->pppoe_gap +
+								 (sizeof(struct ipv6hdr)));
+
+					th = (struct tcphdr *)skb_transport_header(skb);
+
+					memcpy(&ppe_parse_result->th.source, &th->source, sizeof(th->source));
+					memcpy(&ppe_parse_result->th.dest, &th->dest, sizeof(th->dest));
+				} else if (ip6h->nexthdr == NEXTHDR_UDP) {
+					skb_set_transport_header(skb, ETH_HLEN + ppe_parse_result->vlan1_gap +
+								 ppe_parse_result->vlan2_gap +
+								 ppe_parse_result->pppoe_gap +
+								 (sizeof(struct ipv6hdr)));
+
+					uh = (struct udphdr *)skb_transport_header(skb);
+					memcpy(&ppe_parse_result->uh.source, &uh->source, sizeof(uh->source));
+					memcpy(&ppe_parse_result->uh.dest, &uh->dest, sizeof(uh->dest));
+				}
+				ppe_parse_result->pkt_type = IPV6_6RD;
+
+	/* identification field in outer ipv4 header is zero*/
+	/*after erntering binding state.*/
+	/* some 6rd relay router will drop the packet */
+			}
+		}
+		if ((iph->protocol != IPPROTO_TCP) && (iph->protocol != IPPROTO_UDP) &&
+		    (iph->protocol != IPPROTO_GRE) && (iph->protocol != IPPROTO_IPV6)) {
+		if (debug_level >= 6)
+			DD;
+			return 1;
+		}
+/* Packet format is not supported */
+	} else if (ppe_parse_result->eth_type == htons(ETH_P_IPV6) ||
+		   (ppe_parse_result->eth_type == htons(ETH_P_PPP_SES) &&
+		    ppe_parse_result->ppp_tag == htons(PPP_IPV6))) {
+		ip6h = (struct ipv6hdr *)skb_network_header(skb);
+		memcpy(&ppe_parse_result->ip6h, ip6h, sizeof(struct ipv6hdr));
+
+		if (ip6h->nexthdr == NEXTHDR_TCP) {
+			skb_set_transport_header(skb, ETH_HLEN + ppe_parse_result->vlan1_gap +
+						 ppe_parse_result->vlan2_gap +
+						 ppe_parse_result->pppoe_gap +
+						 (sizeof(struct ipv6hdr)));
+
+			th = (struct tcphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_result->th, th, sizeof(struct tcphdr));
+
+			if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_HNAPT) {
+				ppe_parse_result->pkt_type = IPV4_DSLITE;
+				if (xlat_enable == 1)
+					return 1;
+			} else
+				ppe_parse_result->pkt_type = IPV6_5T_ROUTE;
+
+
+		} else if (ip6h->nexthdr == NEXTHDR_UDP) {
+			skb_set_transport_header(skb, ETH_HLEN + ppe_parse_result->vlan1_gap +
+						 ppe_parse_result->vlan2_gap +
+						 ppe_parse_result->pppoe_gap +
+						 (sizeof(struct ipv6hdr)));
+			uh = (struct udphdr *)skb_transport_header(skb);
+			memcpy(&ppe_parse_result->uh, uh, sizeof(struct udphdr));
+
+			if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_HNAPT) {
+				ppe_parse_result->pkt_type = IPV4_DSLITE;
+				if (xlat_enable == 1)
+					return 1;
+			} else
+				ppe_parse_result->pkt_type = IPV6_5T_ROUTE;
+
+		} else if (ip6h->nexthdr == NEXTHDR_IPIP) {
+			
+			skb_set_transport_header(skb, ETH_HLEN + ppe_parse_result->vlan1_gap +
+						 ppe_parse_result->vlan2_gap +
+						 ppe_parse_result->pppoe_gap +
+						 (sizeof(struct ipv6hdr)) +
+						 sizeof(struct iphdr));
+			ipv6_head_len = sizeof(struct iphdr);
+			memcpy(&ppe_parse_result->iph, ip6h + ipv6_head_len,
+			       sizeof(struct iphdr));
+
+			if(SwitchDslMape == 1) {
+				if (ppe_parse_result->iph.protocol == IPPROTO_TCP) {	
+					th = (struct tcphdr *)skb_transport_header(skb);
+					memcpy(&ppe_parse_result->th, th, sizeof(struct tcphdr));
+					if(ppe_parse_result->iph.frag_off & htons(IP_MF | IP_OFFSET)) {
+						return 1;
+					}
+				} else if (ppe_parse_result->iph.protocol == IPPROTO_UDP) {
+					uh = (struct udphdr *)skb_transport_header(skb);
+					memcpy(&ppe_parse_result->uh, uh, sizeof(struct udphdr));
+					if(ppe_parse_result->iph.frag_off & htons(IP_MF|IP_OFFSET)) {
+						return 1;
+					}
+				}
+				ppe_parse_result->pkt_type = IPV4_MAP_E;
+			} else {
+				ppe_parse_result->pkt_type = IPV4_DSLITE;
+			}
+
+		} else {
+			ppe_parse_result->pkt_type = IPV6_3T_ROUTE;
+		}
+
+	} else {
+		if (debug_level >= 6)
+			DD;
+		return 1;
+	}
+
+	if (debug_level >= 6) {
+		pr_notice("--------------\n");
+		pr_notice("DMAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+			ppe_parse_result->dmac[0], ppe_parse_result->dmac[1],
+			 ppe_parse_result->dmac[2], ppe_parse_result->dmac[3],
+			 ppe_parse_result->dmac[4], ppe_parse_result->dmac[5]);
+		pr_notice("SMAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+			ppe_parse_result->smac[0], ppe_parse_result->smac[1],
+			 ppe_parse_result->smac[2], ppe_parse_result->smac[3],
+			 ppe_parse_result->smac[4], ppe_parse_result->smac[5]);
+		pr_notice("Eth_Type=%x\n", ppe_parse_result->eth_type);
+		if (ppe_parse_result->vlan1_gap > 0)
+			pr_notice("VLAN1 ID=%x\n", ntohs(ppe_parse_result->vlan1));
+
+		if (ppe_parse_result->vlan2_gap > 0)
+			pr_notice("VLAN2 ID=%x\n", ntohs(ppe_parse_result->vlan2));
+
+		if (ppe_parse_result->pppoe_gap > 0) {
+			pr_notice("PPPOE Session ID=%x\n", ppe_parse_result->pppoe_sid);
+			pr_notice("PPP Tag=%x\n", ntohs(ppe_parse_result->ppp_tag));
+		}
+		pr_notice("PKT_TYPE=%s\n",
+			ppe_parse_result->pkt_type ==
+			 0 ? "IPV4_HNAPT" : ppe_parse_result->pkt_type ==
+			 1 ? "IPV4_HNAT" : ppe_parse_result->pkt_type ==
+			 3 ? "IPV4_DSLITE" : ppe_parse_result->pkt_type ==
+			 5 ? "IPV6_ROUTE" : ppe_parse_result->pkt_type == 7 ? "IPV6_6RD" : "Unknown");
+		if (ppe_parse_result->pkt_type == IPV4_HNAT) {
+			pr_notice("SIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.saddr)));
+			pr_notice("DIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.daddr)));
+			pr_notice("TOS=%x\n", ntohs(ppe_parse_result->iph.tos));
+		} else if (ppe_parse_result->pkt_type == IPV4_HNAPT) {
+			pr_notice("SIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.saddr)));
+			pr_notice("DIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.daddr)));
+			pr_notice("TOS=%x\n", ntohs(ppe_parse_result->iph.tos));
+
+			if (ppe_parse_result->iph.protocol == IPPROTO_TCP) {
+				pr_notice("TCP SPORT=%d\n", ntohs(ppe_parse_result->th.source));
+				pr_notice("TCP DPORT=%d\n", ntohs(ppe_parse_result->th.dest));
+			} else if (ppe_parse_result->iph.protocol == IPPROTO_UDP) {
+				pr_notice("UDP SPORT=%d\n", ntohs(ppe_parse_result->uh.source));
+				pr_notice("UDP DPORT=%d\n", ntohs(ppe_parse_result->uh.dest));
+			}
+		} else if (ppe_parse_result->pkt_type == IPV6_5T_ROUTE) {
+			pr_notice("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
+				ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[0]),
+			     ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[1]),
+			     ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[2]),
+			     ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[3]),
+			     ntohs(ppe_parse_result->th.source),
+			     ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[0]),
+			     ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[1]),
+			     ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[2]),
+			     ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[3]),
+			     ntohs(ppe_parse_result->th.dest));
+		} else if (ppe_parse_result->pkt_type == IPV6_6RD) {
+			/* fill in ipv4 6rd entry */
+			pr_notice("packet_type = IPV6_6RD\n");
+			pr_notice("SIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.saddr)));
+			pr_notice("DIP=%s\n", ip_to_str(ntohl(ppe_parse_result->iph.daddr)));
+
+			pr_notice("Checksum=%x\n", ntohs(ppe_parse_result->iph.check));
+			pr_notice("ipV4 ID =%x\n", ntohs(ppe_parse_result->iph.id));
+			pr_notice("Flag=%x\n", ntohs(ppe_parse_result->iph.frag_off) >> 13);
+			pr_notice("TTL=%x\n", ppe_parse_result->iph.ttl);
+			pr_notice("TOS=%x\n", ppe_parse_result->iph.tos);
+		}
+	}
+
+	return 0;
+}
+
+int32_t ppe_fill_L2_info(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+	/* if this entry is already in binding state, skip it */
+	if (entry->bfib1.state == BIND)
+		return 1;
+
+	/* Set VLAN Info - VLAN1/VLAN2 */
+	/* Set Layer2 Info - DMAC, SMAC */
+	if ((ppe_parse_result->pkt_type == IPV4_HNAT) || (ppe_parse_result->pkt_type == IPV4_HNAPT)) {
+		if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE || 
+		    entry->ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {/* DS-Lite WAN->LAN */
+			if (fe_feature & HNAT_IPV6) {
+				foe_set_mac_hi_info(entry->ipv4_dslite.dmac_hi, ppe_parse_result->dmac);
+				foe_set_mac_lo_info(entry->ipv4_dslite.dmac_lo, ppe_parse_result->dmac);
+				foe_set_mac_hi_info(entry->ipv4_dslite.smac_hi, ppe_parse_result->smac);
+				foe_set_mac_lo_info(entry->ipv4_dslite.smac_lo, ppe_parse_result->smac);
+				entry->ipv4_dslite.vlan1 = ntohs(ppe_parse_result->vlan1);
+				entry->ipv4_dslite.pppoe_id = ntohs(ppe_parse_result->pppoe_sid);
+				entry->ipv4_dslite.vlan2_winfo = ntohs(ppe_parse_result->vlan2);
+
+				entry->ipv4_dslite.etype = ntohs(ppe_parse_result->vlan_tag);
+			} else {
+				return 1;
+			}
+
+		} else {	/* IPv4 WAN<->LAN */
+			foe_set_mac_hi_info(entry->ipv4_hnapt.dmac_hi, ppe_parse_result->dmac);
+			foe_set_mac_lo_info(entry->ipv4_hnapt.dmac_lo, ppe_parse_result->dmac);
+			foe_set_mac_hi_info(entry->ipv4_hnapt.smac_hi, ppe_parse_result->smac);
+			foe_set_mac_lo_info(entry->ipv4_hnapt.smac_lo, ppe_parse_result->smac);
+			entry->ipv4_hnapt.vlan1 = ntohs(ppe_parse_result->vlan1);
+#ifdef VPRI_REMARK_TEST
+			/* VPRI=0x7 */
+			entry->ipv4_hnapt.vlan1 |= (7 << 13);
+#endif
+			entry->ipv4_hnapt.pppoe_id = ntohs(ppe_parse_result->pppoe_sid);
+			entry->ipv4_hnapt.vlan2_winfo = ntohs(ppe_parse_result->vlan2);
+
+			entry->ipv4_hnapt.etype = ntohs(ppe_parse_result->vlan_tag);
+		}
+	} else {
+		if (fe_feature & HNAT_IPV6) {
+			foe_set_mac_hi_info(entry->ipv6_5t_route.dmac_hi, ppe_parse_result->dmac);
+			foe_set_mac_lo_info(entry->ipv6_5t_route.dmac_lo, ppe_parse_result->dmac);
+			foe_set_mac_hi_info(entry->ipv6_5t_route.smac_hi, ppe_parse_result->smac);
+			foe_set_mac_lo_info(entry->ipv6_5t_route.smac_lo, ppe_parse_result->smac);
+			entry->ipv6_5t_route.vlan1 = ntohs(ppe_parse_result->vlan1);
+			entry->ipv6_5t_route.pppoe_id = ntohs(ppe_parse_result->pppoe_sid);
+			entry->ipv6_5t_route.vlan2_winfo = ntohs(ppe_parse_result->vlan2);
+
+			entry->ipv6_5t_route.etype = ntohs(ppe_parse_result->vlan_tag);
+		} else {
+				return 1;
+		}
+	}
+
+/* VLAN Layer:*/
+/* 0: outgoing packet is untagged packet*/
+/* 1: outgoing packet is tagged packet*/
+/* 2: outgoing packet is double tagged packet*/
+/* 3: outgoing packet is triple tagged packet*/
+/* 4: outgoing packet is fourfold tagged packet*/
+	entry->bfib1.vlan_layer = ppe_parse_result->vlan_layer;
+
+#ifdef VLAN_LAYER_TEST
+	/* outgoing packet is triple tagged packet */
+	entry->bfib1.vlan_layer = 3;
+	entry->ipv4_hnapt.vlan1 = 2;
+	entry->ipv4_hnapt.vlan2 = 1;
+#endif
+	if (ppe_parse_result->pppoe_gap)
+		entry->bfib1.psn = 1;
+	else
+		entry->bfib1.psn = 0;
+
+	entry->ipv4_hnapt.bfib1.vpm = 1;	/* 0x8100 */
+	return 0;
+}
+
+
+static uint16_t ppe_get_chkbase(struct iphdr *iph)
+{
+	u16 org_chksum = ntohs(iph->check);
+	u16 org_tot_len = ntohs(iph->tot_len);
+	u16 org_id = ntohs(iph->id);
+	u16 chksum_tmp, tot_len_tmp, id_tmp;
+	u32 tmp = 0;
+	u16 chksum_base = 0;
+
+	chksum_tmp = ~(org_chksum);
+	tot_len_tmp = ~(org_tot_len);
+	id_tmp = ~(org_id);
+	tmp = chksum_tmp + tot_len_tmp + id_tmp;
+	tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
+	tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
+	chksum_base = tmp & 0xFFFF;
+
+	return chksum_base;
+}
+
+
+int32_t ppe_fill_L3_info_med(struct sk_buff *skb, struct foe_entry *entry,
+			 struct pkt_parse_result *ppe_parse_result)
+{
+	/* IPv4 or IPv4 over PPPoE */
+
+	if ((ppe_parse_result->pkt_type == IPV4_HNAT) ||
+	    (ppe_parse_result->pkt_type == IPV4_HNAPT)) {
+			if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
+			    entry->ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {/* DS-Lite WAN->LAN */
+			if (fe_feature & HNAT_IPV6) {
+				if (fe_feature & PPE_MIB) {
+					entry->ipv4_dslite.iblk2.mibf = 1;
+				}
+				entry->ipv4_dslite.bfib1.rmt = 1;	/* remove outer IPv6 header */
+				entry->ipv4_dslite.iblk2.dscp = ppe_parse_result->iph.tos;
+			}
+
+		} else {
+			entry->ipv4_hnapt.new_sip = ntohl(ppe_parse_result->iph.saddr);
+			entry->ipv4_hnapt.new_dip = ntohl(ppe_parse_result->iph.daddr);
+			entry->ipv4_hnapt.iblk2.dscp = ppe_parse_result->iph.tos;
+			if (fe_feature & PPE_MIB)
+				entry->ipv4_hnapt.iblk2.mibf = 1;
+		}
+	}
+
+		if (ppe_parse_result->pkt_type == IPV6_6RD) {
+				/* fill in ipv4 6rd entry */
+			entry->ipv6_6rd.tunnel_sipv4 = ntohl(ppe_parse_result->iph.saddr);
+			entry->ipv6_6rd.tunnel_dipv4 = ntohl(ppe_parse_result->iph.daddr);
+			entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(&ppe_parse_result->iph);
+			entry->ipv6_6rd.flag = (ntohs(ppe_parse_result->iph.frag_off) >> 13);
+			entry->ipv6_6rd.ttl = ppe_parse_result->iph.ttl;
+			entry->ipv6_6rd.dscp = ppe_parse_result->iph.tos;
+			if (fe_feature & PPE_MIB)
+				entry->ipv6_6rd.iblk2.mibf = 1;
+
+			hwnat_set_6rd_id(entry, ppe_parse_result);
+				/* IPv4 DS-Lite and IPv6 6RD shall be turn on by SW during initialization */
+			entry->bfib1.pkt_type = IPV6_6RD;
+			entry->bfib1.rmt = 0;
+		}
+		/* IPv6 or IPv6 over PPPoE */
+		if (ppe_parse_result->pkt_type == IPV6_3T_ROUTE ||
+		    ppe_parse_result->pkt_type == IPV6_5T_ROUTE) {
+				/* incoming packet is 6RD and need to remove outer IPv4 header */
+			if (entry->bfib1.pkt_type == IPV6_6RD) {
+				entry->ipv6_3t_route.bfib1.rmt = 1;
+				entry->ipv6_3t_route.iblk2.dscp =
+					(ppe_parse_result->ip6h.
+					priority << 4 | (ppe_parse_result->ip6h.flow_lbl[0] >> 4));
+				if (fe_feature & PPE_MIB)
+					entry->ipv6_3t_route.iblk2.mibf = 1;
+
+			} else {
+				/* fill in ipv6 routing entry */
+				entry->ipv6_3t_route.ipv6_sip0 =
+					ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[0]);
+				entry->ipv6_3t_route.ipv6_sip1 =
+					ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[1]);
+				entry->ipv6_3t_route.ipv6_sip2 =
+					ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[2]);
+				entry->ipv6_3t_route.ipv6_sip3 =
+					ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[3]);
+				entry->ipv6_3t_route.ipv6_dip0 =
+					ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[0]);
+				entry->ipv6_3t_route.ipv6_dip1 =
+					ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[1]);
+				entry->ipv6_3t_route.ipv6_dip2 =
+					ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[2]);
+				entry->ipv6_3t_route.ipv6_dip3 =
+					ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[3]);
+				entry->ipv6_3t_route.iblk2.dscp = (ppe_parse_result->ip6h.
+					priority << 4 | (ppe_parse_result->ip6h.flow_lbl[0] >> 4));
+				if (fe_feature & PPE_MIB)
+					entry->ipv6_3t_route.iblk2.mibf = 1;
+			}
+		} else if (ppe_parse_result->pkt_type == IPV4_DSLITE) {
+				/* fill in DSLite entry */
+				entry->ipv4_dslite.tunnel_sipv6_0 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[0]);
+				entry->ipv4_dslite.tunnel_sipv6_1 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[1]);
+				entry->ipv4_dslite.tunnel_sipv6_2 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[2]);
+				entry->ipv4_dslite.tunnel_sipv6_3 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[3]);
+
+				entry->ipv4_dslite.tunnel_dipv6_0 =
+				    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[0]);
+				entry->ipv4_dslite.tunnel_dipv6_1 =
+				    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[1]);
+				entry->ipv4_dslite.tunnel_dipv6_2 =
+				    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[2]);
+				entry->ipv4_dslite.tunnel_dipv6_3 =
+				    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[3]);
+				if (fe_feature & PPE_MIB)
+					entry->ipv4_dslite.iblk2.mibf = 1;
+
+				memcpy(entry->ipv4_dslite.flow_lbl, ppe_parse_result->ip6h.flow_lbl,
+				       sizeof(ppe_parse_result->ip6h.flow_lbl));
+				entry->ipv4_dslite.priority = ppe_parse_result->ip6h.priority;
+				entry->ipv4_dslite.hop_limit = ppe_parse_result->ip6h.hop_limit;
+				/* IPv4 DS-Lite and IPv6 6RD shall be turn on by SW during initialization */
+				entry->bfib1.pkt_type = IPV4_DSLITE;
+				entry->bfib1.rmt = 0;
+		};
+
+	return 0;
+}
+
+
+int32_t ppe_fill_L3_info(struct sk_buff *skb, struct foe_entry *entry,
+			 struct pkt_parse_result *ppe_parse_result)
+{
+	/* IPv4 or IPv4 over PPPoE */
+	if ((ppe_parse_result->eth_type == htons(ETH_P_IP)) ||
+	    (ppe_parse_result->eth_type == htons(ETH_P_PPP_SES) &&
+	     ppe_parse_result->ppp_tag == htons(PPP_IP))) {
+		if ((ppe_parse_result->pkt_type == IPV4_HNAT) ||
+		    (ppe_parse_result->pkt_type == IPV4_HNAPT)) {
+			if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
+			    entry->ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {/* DS-Lite WAN->LAN */
+				if (fe_feature & HNAT_IPV6) {
+					if (fe_feature & PPE_MIB) {
+						entry->ipv4_dslite.iblk2.mibf = 1;
+					}
+					entry->ipv4_dslite.bfib1.rmt = 1;	/* remove outer IPv6 header */
+					entry->ipv4_dslite.iblk2.dscp = ppe_parse_result->iph.tos;
+				}
+
+			} else {
+
+				entry->ipv4_hnapt.new_sip = ntohl(ppe_parse_result->iph.saddr);
+				entry->ipv4_hnapt.new_dip = ntohl(ppe_parse_result->iph.daddr);
+				entry->ipv4_hnapt.iblk2.dscp = ppe_parse_result->iph.tos;
+#ifdef DSCP_REMARK_TEST
+				entry->ipv4_hnapt.iblk2.dscp = 0xff;
+#endif
+				if (fe_feature & PPE_MIB)
+					entry->ipv4_hnapt.iblk2.mibf = 1;
+			}
+		}
+
+		if (ppe_parse_result->pkt_type == IPV6_6RD) {
+			/* fill in ipv4 6rd entry */
+			entry->ipv6_6rd.tunnel_sipv4 = ntohl(ppe_parse_result->iph.saddr);
+			entry->ipv6_6rd.tunnel_dipv4 = ntohl(ppe_parse_result->iph.daddr);
+			entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(&ppe_parse_result->iph);
+			entry->ipv6_6rd.flag = (ntohs(ppe_parse_result->iph.frag_off) >> 13);
+			entry->ipv6_6rd.ttl = ppe_parse_result->iph.ttl;
+			entry->ipv6_6rd.dscp = ppe_parse_result->iph.tos;
+			if (fe_feature & PPE_MIB) {
+				entry->ipv6_6rd.iblk2.mibf = 1;
+
+			}
+			hwnat_set_6rd_id(entry, ppe_parse_result);
+			/* IPv4 DS-Lite and IPv6 6RD shall be turn on by SW during initialization */
+			entry->bfib1.pkt_type = IPV6_6RD;
+			entry->bfib1.rmt = 0;
+
+		}
+	}
+
+	/* IPv6 or IPv6 over PPPoE */
+	if (ppe_parse_result->eth_type == htons(ETH_P_IPV6) ||
+	    (ppe_parse_result->eth_type == htons(ETH_P_PPP_SES) &&
+		  ppe_parse_result->ppp_tag == htons(PPP_IPV6))) {
+		if (ppe_parse_result->pkt_type == IPV6_3T_ROUTE ||
+		    ppe_parse_result->pkt_type == IPV6_5T_ROUTE) {
+			/* incoming packet is 6RD and need to remove outer IPv4 header */
+			if (entry->bfib1.pkt_type == IPV6_6RD) {
+				entry->ipv6_3t_route.bfib1.rmt = 1;
+				entry->ipv6_3t_route.iblk2.dscp =
+				    (ppe_parse_result->ip6h.
+				     priority << 4 | (ppe_parse_result->ip6h.flow_lbl[0] >> 4));
+				if (fe_feature & PPE_MIB)
+					entry->ipv6_3t_route.iblk2.mibf = 1;
+			} else {
+				/* fill in ipv6 routing entry */
+				entry->ipv6_3t_route.ipv6_sip0 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[0]);
+				entry->ipv6_3t_route.ipv6_sip1 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[1]);
+				entry->ipv6_3t_route.ipv6_sip2 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[2]);
+				entry->ipv6_3t_route.ipv6_sip3 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[3]);
+
+					entry->ipv6_3t_route.ipv6_dip0 =
+					    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[0]);
+					entry->ipv6_3t_route.ipv6_dip1 =
+					    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[1]);
+					entry->ipv6_3t_route.ipv6_dip2 =
+					    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[2]);
+					entry->ipv6_3t_route.ipv6_dip3 =
+					    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[3]);
+					entry->ipv6_3t_route.iblk2.dscp =
+					    (ppe_parse_result->ip6h.
+					     priority << 4 | (ppe_parse_result->ip6h.flow_lbl[0] >> 4));
+
+	/*#ifdef DSCP_REMARK_TEST*/
+	/*				entry->ipv6_3t_route.iblk2.dscp = 0xff;*/
+	/*#endif*/
+
+					if (fe_feature & PPE_MIB)
+						entry->ipv6_3t_route.iblk2.mibf = 1;
+				}
+		} else if (ppe_parse_result->pkt_type == IPV4_DSLITE ||
+			   ppe_parse_result->pkt_type == IPV4_MAP_E) {
+				/* fill in DSLite entry */
+				entry->ipv4_dslite.tunnel_sipv6_0 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[0]);
+				entry->ipv4_dslite.tunnel_sipv6_1 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[1]);
+				entry->ipv4_dslite.tunnel_sipv6_2 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[2]);
+				entry->ipv4_dslite.tunnel_sipv6_3 =
+				    ntohl(ppe_parse_result->ip6h.saddr.s6_addr32[3]);
+
+				entry->ipv4_dslite.tunnel_dipv6_0 =
+				    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[0]);
+				entry->ipv4_dslite.tunnel_dipv6_1 =
+				    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[1]);
+				entry->ipv4_dslite.tunnel_dipv6_2 =
+				    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[2]);
+				entry->ipv4_dslite.tunnel_dipv6_3 =
+				    ntohl(ppe_parse_result->ip6h.daddr.s6_addr32[3]);
+				if (fe_feature & PPE_MIB)
+					entry->ipv4_dslite.iblk2.mibf = 1;
+
+				memcpy(entry->ipv4_dslite.flow_lbl, ppe_parse_result->ip6h.flow_lbl,
+				       sizeof(ppe_parse_result->ip6h.flow_lbl));
+				entry->ipv4_dslite.priority = ppe_parse_result->ip6h.priority;
+				entry->ipv4_dslite.hop_limit = ppe_parse_result->ip6h.hop_limit;
+			if(SwitchDslMape == 1) {
+				entry->ipv4_dslite.new_sip = ntohl(ppe_parse_result->iph.saddr);
+				entry->ipv4_dslite.new_dip = ntohl(ppe_parse_result->iph.daddr);
+				entry->bfib1.pkt_type = IPV4_MAP_E;
+			} else {
+				/* IPv4 DS-Lite and IPv6 6RD shall be turn on by SW during initialization */
+				entry->bfib1.pkt_type = IPV4_DSLITE;
+				entry->bfib1.rmt = 0;
+
+			}
+
+
+			};
+		}
+	if ((!IS_IPV4_GRP(entry)) && (!(IS_IPV6_GRP(entry)))) {
+		NAT_PRINT("unknown Pkt_type=%d\n", entry->bfib1.pkt_type);
+		return 1;
+	}
+
+	return 0;
+}
+
+int32_t ppe_fill_L4_info(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+	if (ppe_parse_result->pkt_type == IPV4_HNAPT) {
+		/* DS-LIte WAN->LAN */
+		if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE)
+			return 0;
+		if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
+		/* Set Layer4 Info - NEW_SPORT, NEW_DPORT */
+			if (ppe_parse_result->iph.protocol == IPPROTO_TCP) {
+				entry->ipv4_dslite.new_sport = ntohs(ppe_parse_result->th.source);
+				entry->ipv4_dslite.new_dport = ntohs(ppe_parse_result->th.dest);
+				entry->ipv4_dslite.bfib1.udp = TCP;
+			} else if (ppe_parse_result->iph.protocol == IPPROTO_UDP) {
+				entry->ipv4_dslite.new_sport = ntohs(ppe_parse_result->uh.source);
+				entry->ipv4_dslite.new_dport = ntohs(ppe_parse_result->uh.dest);
+				entry->ipv4_dslite.bfib1.udp = UDP;
+			}
+		}
+		/* Set Layer4 Info - NEW_SPORT, NEW_DPORT */
+		if (ppe_parse_result->iph.protocol == IPPROTO_TCP) {
+			entry->ipv4_hnapt.new_sport = ntohs(ppe_parse_result->th.source);
+			entry->ipv4_hnapt.new_dport = ntohs(ppe_parse_result->th.dest);
+			entry->ipv4_hnapt.bfib1.udp = TCP;
+		} else if (ppe_parse_result->iph.protocol == IPPROTO_UDP) {
+			entry->ipv4_hnapt.new_sport = ntohs(ppe_parse_result->uh.source);
+			entry->ipv4_hnapt.new_dport = ntohs(ppe_parse_result->uh.dest);
+			entry->ipv4_hnapt.bfib1.udp = UDP;
+		}
+	}
+
+	/*else if (ppe_parse_result.pkt_type == IPV4_HNAT)*/
+		/* do nothing */
+	/*else if (ppe_parse_result.pkt_type == IPV6_1T_ROUTE)*/
+		/* do nothing */
+	/*else if (ppe_parse_result.pkt_type == IPV6_3T_ROUTE)*/
+		/* do nothing */
+	/*else if (ppe_parse_result.pkt_type == IPV6_5T_ROUTE)*/
+		/* do nothing */
+	return 0;
+}
+
+static void ppe_set_infoblk2(struct _info_blk2 *iblk2, uint32_t fpidx, uint32_t port_mg,
+			     u32 port_ag, struct pkt_parse_result *ppe_parse_result)
+{
+/* Replace 802.1Q priority by user priority */
+
+/*#ifdef FORCE_UP_TEST*/
+/*	u32 reg;*/
+/**/
+/*	iblk2->fp = 1;*/
+/*	iblk2->up = 7;*/
+/*	reg = reg_read(RALINK_ETH_SW_BASE + 0x2704);*/
+/*	reg |= (0x1 << 11);*/
+/*	reg_write(RALINK_ETH_SW_BASE + 0x2704, reg);*/
+/*#endif*/
+	/* we need to lookup another multicast table if this is multicast flow */
+	if (debug_level >= 6) {
+		pr_notice("%s, fpidx = %x\n", __func__, fpidx);
+	}
+
+
+#if defined(CONFIG_ODU_MCAST_SUPPORT)
+	// ODU project specific flow, unicast flow
+	if (debug_level >= 6) {
+		pr_notice("Kernel config ODU MCAST support set as 0\n");
+	}
+	iblk2->mcast = 0;
+#else
+	if (ppe_parse_result->is_mcast) {
+		iblk2->mcast = 1;
+		if (fe_feature & WIFI_HNAT) {
+			if ((fpidx == WDMA0_PSE_PORT) || (fpidx == WDMA1_PSE_PORT) ||
+			    (fpidx == MDMA_PSE_PORT))
+				fpidx = 0;	/* multicast flow not go to WDMA*/
+		}
+	} else {
+		iblk2->mcast = 0;
+	}
+#endif
+	
+#if defined(CONFIG_HNAT_V2)
+	iblk2->dp = fpidx & 0xf;
+#endif
+
+#if defined(CONFIG_HNAT_V1)
+	iblk2->dp = fpidx & 0x7;
+
+	if (fpidx >= 8)
+		iblk2->dp1 = 1;
+	else
+		iblk2->dp1 = 0;
+#endif
+	if (!(fe_feature & HNAT_QDMA))
+		iblk2->fqos = 0;	/* PDMA MODE should not goes to QoS */
+
+	iblk2->acnt = fpidx;
+	iblk2->pcpl = 0;
+}
+
+/*for 16 queue test*/
+unsigned char queue_number;
+
+void set_ppe_qid(struct sk_buff *skb, struct foe_entry *entry)
+{
+	unsigned int qidx;
+
+	if (IS_IPV4_GRP(entry)) {
+		if (skb->mark > 63)
+			skb->mark = 0;
+		qidx = (skb->mark) & 0x3f;
+#if defined(CONFIG_HNAT_V2)
+		entry->ipv4_hnapt.iblk2.qid = qidx;
+#endif
+#if defined(CONFIG_HNAT_V1)
+		entry->ipv4_hnapt.iblk2.qid1 = ((qidx & 0x30) >> 4);
+		entry->ipv4_hnapt.iblk2.qid = (qidx & 0x0f);
+#endif
+	}
+
+	if (IS_IPV6_GRP(entry)) {
+		if (skb->mark > 63)
+			skb->mark = 0;
+		qidx = (skb->mark) & 0x3f;
+#if defined(CONFIG_HNAT_V2)
+		entry->ipv6_3t_route.iblk2.qid = qidx;
+#endif
+#if defined(CONFIG_HNAT_V1)
+		entry->ipv6_3t_route.iblk2.qid1 = ((qidx & 0x30) >> 4);
+		entry->ipv6_3t_route.iblk2.qid = (qidx & 0x0f);
+#endif
+	}
+	
+}
+
+void set_warp_wifi_dp(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result, int gmac_no)
+{
+	if (debug_level >= 1) {
+		pr_notice("FP = %x, FOE_WDMA_ID = %x, FOE_WC_ID = %x, FOE_BSS_ID = %x\n", gmac_no, FOE_WDMA_ID(skb), FOE_WC_ID(skb), FOE_BSS_ID(skb));
+	}
+
+	if (IS_IPV4_GRP(entry)) {
+		entry->ipv4_hnapt.minfo = 0;
+		ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, gmac_no, NO_USE, WED_ACG, ppe_parse_result);
+		//entry->ipv4_hnapt.iblk2.rx_id = (FOE_WDMA_ID(skb) & 0x03);
+		entry->ipv4_hnapt.iblk2.rx_id = (FOE_RX_ID(skb) & 0x03);
+		entry->ipv4_hnapt.iblk2.winfo = 1;
+		entry->ipv4_hnapt.winfo =
+				((FOE_WC_ID(skb) & 0x3ff) << 6) |
+				(FOE_BSS_ID(skb) & 0x3f);
+	}
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_GRP(entry)) {
+			ppe_set_infoblk2(&entry->ipv6_3t_route.iblk2, gmac_no, NO_USE, WED_ACG, ppe_parse_result);
+			entry->ipv6_3t_route.minfo = 0;
+			entry->ipv6_3t_route.iblk2.rx_id = (FOE_RX_ID(skb) & 0x03);
+			entry->ipv6_3t_route.iblk2.winfo = 1;
+			entry->ipv6_3t_route.winfo =
+				((FOE_WC_ID(skb) & 0x3ff) << 6) |
+				(FOE_BSS_ID(skb) & 0x3f);
+		}
+	}
+}
+
+void pp_fill_qdma_entry(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+	if (IS_IPV4_GRP(entry)) {
+
+		if (FOE_SP(skb) == 5)/* wifi to wifi not go to pse port6 */
+			entry->ipv4_hnapt.iblk2.fqos = 0;
+		else
+			entry->ipv4_hnapt.iblk2.fqos = set_fqos;
+
+
+	}
+	if (IS_IPV6_GRP(entry)) {
+
+		if (FOE_SP(skb) == 5)
+			entry->ipv6_3t_route.iblk2.fqos = 0;	/* wifi to wifi not go to pse port6 */
+		else
+
+			entry->ipv6_3t_route.iblk2.fqos = set_fqos;
+	}
+}
+
+/*port means pse port*/
+void set_dst_port(struct foe_entry *entry, int port, int group, struct pkt_parse_result *ppe_parse_result)
+{
+	if (IS_IPV4_GRP(entry))
+		ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, port, NO_USE, group, ppe_parse_result);	/* 0=PDMA */
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_GRP(entry))
+			ppe_set_infoblk2(&entry->ipv6_3t_route.iblk2, port, NO_USE, group, ppe_parse_result);
+	}
+}
+
+void set_fast_path_info(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+		 struct pkt_parse_result *ppe_parse_result)
+{
+	u8 pse_port;
+
+	if (fe_feature & HNAT_QDMA) {
+		set_ppe_qid(skb, entry);
+		set_eth_fqos(skb, entry);
+		pse_port = PDMA_RX;
+	} else {
+		pse_port = PDMA_RX;
+	}
+
+	set_dst_port(entry, pse_port, 0x3f, ppe_parse_result);
+}
+
+void set_fast_path_info_ext(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+		 struct pkt_parse_result *ppe_parse_result)
+{
+	u8 pse_port;
+
+	if (fe_feature & HNAT_QDMA) {
+		set_ppe_qid(skb, entry);
+		set_eth_fqos(skb, entry);
+	}
+
+	pse_port = gmac_no;
+
+	set_dst_port(entry, pse_port, 0x3f, ppe_parse_result);
+}
+
+void set_rndis_info(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+		 struct pkt_parse_result *ppe_parse_result)
+{
+	u8 pse_port = gmac_no;
+
+#ifdef CONFIG_EDMA_RX
+
+	if (rndis_bind_count % rndis_mod == 0)
+		pse_port = EDMA0_PSE_PORT;
+	else if (rndis_bind_count % rndis_mod == 1)
+		pse_port = EDMA1_PSE_PORT;
+
+	rndis_bind_count ++;
+
+#endif /* CONFIG_EDMA_RX */
+
+	pr_info("%s, FOE_AI(skb):0x%x, FOE_SP(skb):%d, pse_port:%d\n", __func__, FOE_AI(skb), FOE_SP(skb), pse_port);
+
+	if (fe_feature & HNAT_QDMA) {
+		set_ppe_qid(skb, entry);
+		set_eth_fqos(skb, entry);
+	}
+
+	set_dst_port(entry, pse_port, 0x3f, ppe_parse_result);
+
+	/* Set wifi, modem info as zero */
+	if (IS_IPV4_GRP(entry)) {
+		entry->ipv4_hnapt.iblk2.rx_id = 0;
+		entry->ipv4_hnapt.iblk2.winfo = 0;
+		entry->ipv4_hnapt.winfo = 0;
+		entry->ipv4_hnapt.minfo = 0;
+	}
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_GRP(entry)) {
+			entry->ipv6_3t_route.iblk2.rx_id = 0;
+			entry->ipv6_3t_route.iblk2.winfo = 0;
+			entry->ipv6_3t_route.winfo = 0;
+			entry->ipv6_3t_route.minfo = 0;
+		}
+	}
+}
+
+
+void set_wifi_info(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+		 struct pkt_parse_result *ppe_parse_result)
+{
+	u8 pse_port;
+	int sw_fast_path;
+
+	//sw_fast_path = check_hnat_type(skb);
+	if (gmac_no == 0)
+		sw_fast_path = 1;  /* driver fast path */
+	else
+		sw_fast_path = 0; /* hwnat */
+
+	if (fe_feature & HNAT_QDMA) {
+		set_ppe_qid(skb, entry);
+		set_eth_fqos(skb, entry);
+	}
+        pse_port = ADMA_PSE_PORT;
+
+	pr_info("set_wifi_info, gmac_no:%d, pse_port:%d, sw_fast_path:%d\n", gmac_no, pse_port, sw_fast_path);
+
+	if (fe_feature & WARP_WHNAT) {
+		if (!sw_fast_path) {
+			set_warp_wifi_dp(skb, entry, ppe_parse_result, gmac_no);
+		} else {
+			pp_fill_qdma_entry(skb, entry, ppe_parse_result);
+			set_dst_port(entry, pse_port, 0x3f, ppe_parse_result);
+		}
+	} else {
+		pr_notice("Warp wifi hwnat not support==> fast path\n");
+		set_dst_port(entry, pse_port, 0x3f, ppe_parse_result);
+	}
+}
+
+void set_modem_info(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+		 struct pkt_parse_result *ppe_parse_result)
+{
+	if (fe_feature & HNAT_QDMA) {
+		set_ppe_qid(skb, entry);
+		set_eth_fqos(skb, entry);
+	}
+
+	if (IS_IPV4_GRP(entry)) {
+		entry->ipv4_hnapt.iblk2.rx_id = 0;
+		entry->ipv4_hnapt.iblk2.winfo = 0;
+		entry->ipv4_hnapt.winfo = 0;
+		ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, MDMA_PSE_PORT, NO_USE, MED_ACG, ppe_parse_result);
+		entry->ipv4_hnapt.minfo = ((FOE_MINFO_NTYPE(skb) & 0x7)  << 12) |
+					      ((FOE_MINFO_CHID(skb) & 0xff) << 4) |
+					      (BIT(15));
+	}
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_GRP(entry)) {
+			ppe_set_infoblk2(&entry->ipv6_3t_route.iblk2, MDMA_PSE_PORT, NO_USE, MED_ACG, ppe_parse_result);
+			entry->ipv6_3t_route.iblk2.rx_id = 0;
+			entry->ipv6_3t_route.iblk2.winfo = 0;
+			entry->ipv6_3t_route.winfo = 0;
+			entry->ipv6_3t_route.minfo = ((FOE_MINFO_NTYPE(skb) & 0x7)  << 12) |
+						      ((FOE_MINFO_CHID(skb) & 0xff) << 4) |
+						      (BIT(15));
+		}
+	}
+}
+
+/*wan at p4 ==>wan_p4 =1 */
+/*sp_tag enable ==> sp_tag = 1*/
+int eth_sptag_lan_port_ipv4(struct foe_entry *entry, int wan_p4, struct pkt_parse_result *ppe_parse_result)
+{
+	if (wan_p4 == 1) {
+		if (((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 1) ||
+		    ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 2) ||
+		    ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 3) ||
+		    ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 4)) {
+			if ((bind_dir == DOWNSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH0_ACG, ppe_parse_result);
+			else
+				return 1;
+		}
+	} else {
+		if (((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 2) ||
+		    ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 3) ||
+		    ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 4) ||
+		    ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 5)) {
+			if ((bind_dir == DOWNSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH0_ACG, ppe_parse_result);
+			else
+				return 1;
+		}
+	}
+	return 0;
+}
+
+int eth_sptag_wan_port_ipv4(struct foe_entry *entry, int wan_p4, struct pkt_parse_result *ppe_parse_result)
+{
+	if (wan_p4 == 1) {
+		if ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 5) {
+			if ((bind_dir == UPSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH1_ACG, ppe_parse_result);
+
+			else
+				return 1;
+		}
+	} else {
+		if ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 1) {
+			if ((bind_dir == UPSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH1_ACG, ppe_parse_result);
+
+			else
+				return 1;
+		}
+	}
+	return 0;
+}
+
+int eth_sptag_lan_port_ipv6(struct foe_entry *entry, int wan_p4, struct pkt_parse_result *ppe_parse_result)
+{
+	if (wan_p4 == 1) {
+		if (((entry->ipv6_5t_route.vlan1 & VLAN_VID_MASK) == 1) ||
+		    ((entry->ipv6_5t_route.vlan1 & VLAN_VID_MASK) == 2) ||
+		    ((entry->ipv6_5t_route.vlan1 & VLAN_VID_MASK) == 3) ||
+		    ((entry->ipv6_5t_route.vlan1 & VLAN_VID_MASK) == 4)) {
+			if ((bind_dir == DOWNSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv6_5t_route.iblk2, 1, NO_USE, ETH0_ACG, ppe_parse_result);
+			else
+				return 1;
+		}
+	} else {
+		if (((entry->ipv6_5t_route.vlan1 & VLAN_VID_MASK) == 2) ||
+		    ((entry->ipv6_5t_route.vlan1 & VLAN_VID_MASK) == 3) ||
+		    ((entry->ipv6_5t_route.vlan1 & VLAN_VID_MASK) == 4) ||
+		    ((entry->ipv6_5t_route.vlan1 & VLAN_VID_MASK) == 5)) {
+			if ((bind_dir == DOWNSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv6_5t_route.iblk2, 1, NO_USE, ETH0_ACG, ppe_parse_result);
+			else
+				return 1;
+		}
+	}
+	return 0;
+}
+
+int eth_sptag_wan_port_ipv6(struct foe_entry *entry, int wan_p4, struct pkt_parse_result *ppe_parse_result)
+{
+	if (wan_p4 == 1) {
+		if ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 5) {
+			if ((bind_dir == UPSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv6_5t_route.iblk2, 1, NO_USE, ETH1_ACG, ppe_parse_result);
+			else
+				return 1;
+		}
+	} else {
+		if ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == 1) {
+			if ((bind_dir == UPSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv6_5t_route.iblk2, 1, NO_USE, ETH1_ACG, ppe_parse_result);
+
+			else
+				return 1;
+		}
+	}
+	return 0;
+}
+
+int set_eth_dp_gmac1(struct foe_entry *entry, int gmac_no, struct pkt_parse_result *ppe_parse_result)
+{
+				/* only one GMAC */
+	if (IS_IPV4_GRP(entry)) {
+#ifdef	CONFIG_RAETH_SPECIAL_TAG
+			if (fe_feature & HNAT_WAN_P4) {
+				/* sp tag enable, wan at port4 */
+				eth_sptag_lan_port_ipv4(entry, 1, ppe_parse_result);
+				eth_sptag_wan_port_ipv4(entry, 1, ppe_parse_result);
+			} else {
+				eth_sptag_lan_port_ipv4(entry, 0, ppe_parse_result);
+				eth_sptag_wan_port_ipv4(entry, 0, ppe_parse_result);
+			} /* not support one arm */
+#else
+			if ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == lan_vid) {
+				if ((bind_dir == DOWNSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+					ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH0_ACG,
+							 ppe_parse_result);
+				else
+					return 1;
+			} else if ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == wan_vid) {
+				if ((bind_dir == UPSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+					ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH1_ACG,
+							 ppe_parse_result);
+
+				else
+					return 1;
+			} else {/* one-arm */
+				ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH0_ACG,
+						 ppe_parse_result);
+			}
+#endif
+	}
+
+	if (IS_IPV6_GRP(entry)) {
+#ifdef	CONFIG_RAETH_SPECIAL_TAG
+		if (fe_feature & HNAT_WAN_P4) { /* sp tag enable, wan at port4 */
+			eth_sptag_lan_port_ipv4(entry, 1, ppe_parse_result);
+			eth_sptag_wan_port_ipv4(entry, 1, ppe_parse_result);
+		} else {
+			eth_sptag_lan_port_ipv4(entry, 0, ppe_parse_result);
+			eth_sptag_wan_port_ipv4(entry, 0, ppe_parse_result);
+		}
+#else
+		if ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == lan_vid) {
+				if ((bind_dir == DOWNSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+					ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH0_ACG,
+							 ppe_parse_result);
+				else
+					return 1;
+		} else if ((entry->ipv4_hnapt.vlan1 & VLAN_VID_MASK) == wan_vid) {
+			if ((bind_dir == UPSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+				ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH1_ACG,
+						ppe_parse_result);
+
+			else
+				return 1;
+		} else/* one-arm */
+				ppe_set_infoblk2(&entry->ipv4_hnapt.iblk2, 1, NO_USE, ETH0_ACG,
+						ppe_parse_result);
+	}
+	
+#endif
+	return 0;
+}
+
+int set_eth_dp_gmac2(struct foe_entry *entry, int gmac_no,
+		     struct pkt_parse_result *ppe_parse_result)
+{
+		/* RT3883/MT7621 with 2xGMAC - Assuming GMAC2=WAN  and GMAC1=LAN */
+	if (gmac_no == 1) {
+		if ((bind_dir == DOWNSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+			set_dst_port(entry, 1, 1, ppe_parse_result); /*pse port1,goup1*/
+
+		else
+			return 1;
+	} else if (gmac_no == 2) {
+		if ((bind_dir == UPSTREAM_ONLY) || (bind_dir == BIDIRECTION))
+			set_dst_port(entry, 2, 2, ppe_parse_result); /*pse port1, group2*/
+		else
+			return 1;
+	}
+	return 0;
+}
+
+void set_eth_fqos(struct sk_buff *skb, struct foe_entry *entry)
+{
+	if (IS_IPV4_GRP(entry)) {
+
+			if (FOE_SP(skb) == 5) {
+				entry->ipv4_hnapt.iblk2.fqos = 0;
+			} else {
+				if (fe_feature & ETH_QOS)
+					entry->ipv4_hnapt.iblk2.fqos = set_fqos;
+				else
+					entry->ipv4_hnapt.iblk2.fqos = 0;
+			}
+	}
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV6_GRP(entry)) {
+
+			if (FOE_SP(skb) == 5) {
+				entry->ipv6_5t_route.iblk2.fqos = 0;
+			} else{
+				if (fe_feature & ETH_QOS)
+					entry->ipv6_5t_route.iblk2.fqos = set_fqos;
+				else
+					entry->ipv6_5t_route.iblk2.fqos = 0;
+			}
+
+		}
+	}
+}
+
+
+uint32_t ppe_set_ext_if_num(struct sk_buff *skb, struct foe_entry *entry)
+{
+	u32 offset = 0;
+	u32 i = 0;
+	int dev_match = 0;
+
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == skb->dev) {
+			offset = i;
+			dev_match = 1;
+			break;
+		}
+	}
+
+#ifdef CONFIG_RAETH_EDMA
+	for (i = 1; i < MAX_IF_NUM; i++) {
+		if (dst_port[i]->name == NULL) {
+			pr_err("dst_port name is NULL\n");
+			break;
+		}
+
+		if ((strcmp(dst_port[i]->name, DEV_NAME) == 0 && strcmp(skb->dev->name, AQR_DEV_NAME) == 0) ||
+		    (strcmp(dst_port[i]->name, DEV2_NAME) == 0 && strcmp(skb->dev->name, AQR_DEV2_NAME) == 0)) {
+			offset = i;
+			dev_match = 1;
+			if (debug_level >= 7)
+				pr_notice("[HS-ethernet/HWNAT/TX] %s : dev_match Interfacess=%s, vir_if_idx=%x\n", __func__, skb->dev->name, offset);
+			break;
+		}
+	}
+#endif
+
+	if (dev_match == 0) {
+		if (debug_level >= 1)
+			pr_notice("%s UnKnown Interface, offset =%x\n", __func__, i);
+		return 1;
+	}
+
+	if (IS_IPV4_HNAT(entry) || IS_IPV4_HNAPT(entry)) {
+		entry->ipv4_hnapt.act_dp = offset;
+		return 0;
+	}
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV4_DSLITE(entry))
+			entry->ipv4_dslite.act_dp = offset;
+		else if (IS_IPV6_3T_ROUTE(entry))
+			entry->ipv6_3t_route.act_dp = offset;
+		else if (IS_IPV6_5T_ROUTE(entry))
+			entry->ipv6_5t_route.act_dp = offset;
+		else if (IS_IPV6_6RD(entry))
+			entry->ipv6_6rd.act_dp = offset;
+		else {
+			if (debug_level >= 1)
+				pr_notice("%s UnKnown packet type \n", __func__);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+int ppe_forbit_bind(struct sk_buff *skb, struct foe_entry *entry) {
+
+	int act_dp = get_act_dp(entry);
+	int rxif_idx = FOE_IF_IDX(skb);
+
+	if (entry->ipv4_hnapt.sip == 0) {
+		if (debug_level >= 3)
+			pr_notice("%s(), sip is 0\n", __func__);
+		return 1;
+	}
+
+	if (entry->bfib1.state != UNBIND) {
+		if (debug_level >= 3)
+			pr_notice("%s(), state is %d\n", __func__, entry->bfib1.state);
+		return 1;
+	}
+
+	/* empty net device*/
+	if (rxif_idx == 0 || act_dp == 0) {
+		if (debug_level >= 3)
+			pr_notice("%s(), invalid port: %d,%d\n", __func__, rxif_idx, act_dp);
+		return 1;
+	}
+
+	/* modem in and modem out: caused by skb reuse */
+	if (rxif_idx >= DP_CCMNI0 && act_dp >= DP_CCMNI0){
+	    	if (debug_level >= 3)
+			pr_notice("%s(), both are modem port: %d,%d\n", __func__, rxif_idx, act_dp);
+		return 1;
+	}
+
+	return 0;
+}
+
+void ppe_set_entry_bind(struct sk_buff *skb, struct foe_entry *entry)
+{
+	u32 current_time;
+	u32 act_dp;
+	int forbit;
+
+	forbit = ppe_forbit_bind(skb, entry);
+	if (forbit)
+		return;
+
+	/* Set Current time to time_stamp field in information block 1 */
+	current_time = reg_read(FOE_TS) & 0x3FFF;
+	entry->bfib1.time_stamp = (uint16_t)current_time;
+
+	/* Ipv4: TTL / Ipv6: Hot Limit filed */
+	entry->ipv4_hnapt.bfib1.ttl = DFL_FOE_TTL_REGEN;
+
+	/* enable cache by default */
+
+	entry->ipv4_hnapt.bfib1.cah = 1;
+
+	hwnat_set_packet_sampling(entry);
+
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+	entry->udib1.preb = 1;
+#else
+	if (debug_level >= 1) {
+		act_dp = get_act_dp(entry);
+		pr_notice("%s,!!!!!FOE_IF_IDX = %d(%s), act_dp=%d(%s)\n",
+			__func__,
+			FOE_IF_IDX(skb), dst_port[FOE_IF_IDX(skb)]->name,
+			act_dp, dst_port[act_dp]->name);
+	}
+
+	set_rxif_idx(entry, FOE_IF_IDX(skb));
+
+	/* Change Foe Entry State to Binding State */
+	entry->bfib1.state = BIND;
+
+	ppe_start_mib_timer(skb, entry);
+
+	/* Dump Binding Entry */
+	if (debug_level >= 1)
+		foe_dump_entry(FOE_ENTRY_NUM(skb), entry);
+#endif
+}
+
+void ppe_dev_reg_handler(struct net_device *dev)
+{
+	int i;
+
+	if (dev == NULL) {
+		pr_notice("%s,interface not present\n", __func__);
+		return;
+	}
+
+	if (strncmp(dev->name, "ccmni0", 6) == 0) {
+		if (!dst_port[8]) {
+			dst_port[8] = dev;
+			dst_port_type[8] = 0;
+			DP_CCMNI0 = 8;
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni1", 6) == 0) {
+		if (!dst_port[9]) {
+			dst_port[9] = dev;
+			dst_port_type[9] = 0;
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni2", 6) == 0) {
+		if (!dst_port[10]) {
+			dst_port[10] = dev;
+			dst_port_type[10] = 0;
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni3", 6) == 0) {
+		if (!dst_port[11]) {
+			dst_port[11] = dev;
+			dst_port_type[11] = 0;
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni4", 6) == 0) {
+		if (!dst_port[12]) {
+			dst_port[12] = dev;
+			dst_port_type[12] = 0;
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni5", 6) == 0) {
+		if (!dst_port[13]) {
+			dst_port[13] = dev;
+			dst_port_type[13] = 0;
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni6", 6) == 0) {
+		if (!dst_port[14]) {
+			dst_port[14] = dev;
+			dst_port_type[14] = 0;
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni7", 7) == 0) {
+		if (!dst_port[15]) {
+			dst_port[15] = dev;
+			dst_port_type[15] = 0;
+			return;
+		}
+	}
+	for (i = 1; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == dev) {
+			pr_notice("%s : %s dst_port table has beed registered(%d)\n", __func__, dev->name, i);
+			return;
+		}
+		if (!dst_port[i]) {
+			dst_port[i] = dev;
+			dst_port_type[i] = 0;
+			break;
+		}
+	}
+	if (i < MAX_IF_NUM)
+		pr_notice("%s : ineterface %s register (%d), accel. type(%d)\n", __func__, dev->name, i, dst_port_type[i]);
+}
+
+void ppe_dev_unreg_handler(struct net_device *dev)
+{
+	int i;
+
+	if (dev == NULL)
+		return;
+	if (strncmp(dev->name, "ccmni0", 6) == 0) {
+		if (!dst_port[8]) {
+			dst_port[8] = NULL;
+			ppe_reset_dev_mib(dev);
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni1", 6) == 0) {
+		if (!dst_port[9]) {
+			dst_port[9] = NULL;
+			ppe_reset_dev_mib(dev);
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni2", 6) == 0) {
+		if (!dst_port[10]) {
+			dst_port[10] = NULL;
+			ppe_reset_dev_mib(dev);
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni3", 6) == 0) {
+		if (!dst_port[11]) {
+			dst_port[11] = NULL;
+			ppe_reset_dev_mib(dev);
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni4", 6) == 0) {
+		if (!dst_port[12]) {
+			dst_port[12] = NULL;
+			ppe_reset_dev_mib(dev);
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni5", 6) == 0) {
+		if (!dst_port[13]) {
+			dst_port[13] = NULL;
+			ppe_reset_dev_mib(dev);
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni6", 6) == 0) {
+		if (!dst_port[14]) {
+			dst_port[14] = NULL;
+			ppe_reset_dev_mib(dev);
+			return;
+		}
+	}
+	if (strncmp(dev->name, "ccmni7", 7) == 0) {
+		if (!dst_port[15]) {
+			dst_port[15] = NULL;
+			ppe_reset_dev_mib(dev);
+			return;
+		}
+	}
+	for (i = 1; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == dev) {
+			dst_port[i] = NULL;
+			ppe_reset_dev_mib(dev);
+			break;
+		}
+	}
+	if (i < MAX_IF_NUM)
+		pr_notice("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
+}
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+int get_done_bit(struct sk_buff *skb, struct foe_entry *entry)
+{
+	int done_bit;
+
+	done_bit = 0;
+
+	if (IS_IPV4_HNAT(entry) || IS_IPV4_HNAPT(entry)) {
+		done_bit = entry->ipv4_hnapt.resv1;
+		return done_bit;
+	}
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV4_DSLITE(entry)) {
+			done_bit = entry->ipv4_dslite.resv1;
+		} else if (IS_IPV6_3T_ROUTE(entry)) {
+			done_bit = entry->ipv6_3t_route.resv1;
+		} else if (IS_IPV6_5T_ROUTE(entry)) {
+			done_bit = entry->ipv6_5t_route.resv1;
+		} else if (IS_IPV6_6RD(entry)) {
+			done_bit = entry->ipv6_6rd.resv1;
+		} else {
+			pr_notice("get packet format something wrong\n");
+			return 0;
+		}
+	}
+
+	if ((done_bit != 0) && (done_bit != 1)) {
+		pr_notice("done bit something wrong, done_bit = %d\n", done_bit);
+		done_bit = 0;
+	}
+	/* pr_notice("index = %d, done_bit=%d\n", FOE_ENTRY_NUM(skb), done_bit); */
+	return done_bit;
+}
+
+void set_ppe_table_done(struct foe_entry *entry)
+{
+	if (IS_IPV4_HNAT(entry) || IS_IPV4_HNAPT(entry)) {
+		entry->ipv4_hnapt.resv1 = 1;
+		return;
+	}
+
+	if (fe_feature & HNAT_IPV6) {
+		if (IS_IPV4_DSLITE(entry))
+			entry->ipv4_dslite.resv1 = 1;
+		else if (IS_IPV6_3T_ROUTE(entry))
+			entry->ipv6_3t_route.resv1 = 1;
+		else if (IS_IPV6_5T_ROUTE(entry))
+			entry->ipv6_5t_route.resv1 = 1;
+		else if (IS_IPV6_6RD(entry))
+			entry->ipv6_6rd.resv1 = 1;
+		else
+			pr_notice("set packet format something wrong\n");
+	}
+}
+#endif
+
+int get_skb_interface(struct sk_buff *skb)
+{
+	if ((strncmp(skb->dev->name, "rai", 3) == 0) ||
+	    (strncmp(skb->dev->name, "apclii", 6) == 0) ||
+	    (strncmp(skb->dev->name, "wdsi", 4) == 0) ||
+	    (strncmp(skb->dev->name, "wlan", 4) == 0))
+		return 1;
+	else
+		return 0;
+}
+
+void ppe_setfoe_ebl(uint32_t foe_ebl)
+{
+	u32 ppe_flow_set = 0;
+
+	ppe_flow_set = reg_read(PPE_FLOW_SET);
+
+	/* FOE engine need to handle unicast/multicast/broadcast flow */
+	if (foe_ebl == 1) {
+		ppe_flow_set |= (BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN);
+		ppe_flow_set |= (BIT_IPV4_NAT_FRAG_EN | BIT_UDP_IP4F_NAT_EN);	/* ip fragment */
+		ppe_flow_set |= (BIT_IPV4_HASH_GREK);
+		
+		ppe_flow_set |= BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN | BIT_IPV6_5T_ROUTE_EN;
+			/* ppe_flow_set |= (BIT_IPV6_HASH_FLAB); // flow label */
+		
+		ppe_flow_set |= (BIT_IPV6_HASH_GREK);
+		ppe_flow_set |= (BIT_IPV4_464XLAT_EN);
+#if defined(CONFIG_HNAT_V2)
+		ppe_flow_set |= (BIT_IPV4_MAPE_EN);
+#else
+		ppe_flow_set |= (BIT_IPV4_DSL_EN);
+#endif		
+	} else {
+		ppe_flow_set &= ~(BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN);
+		ppe_flow_set &= ~(BIT_IPV4_NAT_FRAG_EN);
+
+		ppe_flow_set &= ~(BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN |
+				  BIT_IPV6_5T_ROUTE_EN);
+#if defined(CONFIG_HNAT_V1)
+		ppe_flow_set &= ~(BIT_IPV4_MAPE_EN);
+#else
+		ppe_flow_set &= ~(BIT_IPV4_DSL_EN);
+#endif	
+			/* ppe_flow_set &= ~(BIT_IPV6_HASH_FLAB); */
+
+		ppe_flow_set &= ~(BIT_IPV6_HASH_GREK);
+		ppe_flow_set &= ~(BIT_IPV4_464XLAT_EN);
+
+	}
+
+	if (ppe_flow_set & BIT_IPV4_MAPE_EN)
+		SwitchDslMape = 1;
+	else 
+		SwitchDslMape = 0;
+
+	reg_write(PPE_FLOW_SET, ppe_flow_set);
+	reg_write(PPE1_FLOW_SET, ppe_flow_set);
+}
+
+int ppe_setfoe_hash_mode(u32 hash_mode, struct device *dev)
+{
+	/* Allocate FOE table base */
+	if (!foe_alloc_tbl(FOE_4TB_SIZ, dev))
+		return 0;
+
+	switch (FOE_4TB_SIZ) {
+	case 1024:
+		reg_modify_bits(PPE_FOE_CFG, FOE_TBL_SIZE_1K, 0, 3);
+		break;
+	case 2048:
+		reg_modify_bits(PPE_FOE_CFG, FOE_TBL_SIZE_2K, 0, 3);
+		break;
+	case 4096:
+		reg_modify_bits(PPE_FOE_CFG, FOE_TBL_SIZE_4K, 0, 3);
+		break;
+	case 8192:
+		reg_modify_bits(PPE_FOE_CFG, FOE_TBL_SIZE_8K, 0, 3);
+		break;
+	case 16384:
+		reg_modify_bits(PPE_FOE_CFG, FOE_TBL_SIZE_16K, 0, 3);
+		break;
+	case 32768:
+		reg_modify_bits(PPE_FOE_CFG, FOE_TBL_SIZE_32K, 0, 3);
+		break;
+	}
+
+	/* Set Hash Mode */
+	reg_modify_bits(PPE_FOE_CFG, hash_mode, 14, 2);
+	reg_write(PPE_HASH_SEED, HASH_SEED);
+
+	reg_modify_bits(PPE_FOE_CFG, 0, 18, 2);	/* disable */
+
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+		reg_modify_bits(PPE_FOE_CFG, 1, 6, 1);	/* pre-bind age enable */
+#endif
+	/* Set action for FOE search miss */
+	reg_modify_bits(PPE_FOE_CFG, FWD_CPU_BUILD_ENTRY, 4, 2);
+
+	return 1;
+}
+
+static void ppe_setage_out(void)
+{
+	/* set Bind Non-TCP/UDP Age Enable */
+	reg_modify_bits(PPE_FOE_CFG, DFL_FOE_NTU_AGE, 7, 1);
+
+	/* set Unbind State Age Enable */
+	reg_modify_bits(PPE_FOE_CFG, DFL_FOE_UNB_AGE, 8, 1);
+
+	/* set min threshold of packet count for aging out at unbind state */
+	reg_modify_bits(PPE_FOE_UNB_AGE, DFL_FOE_UNB_MNP, 16, 16);
+
+	/* set Delta time for aging out an unbind FOE entry */
+	reg_modify_bits(PPE_FOE_UNB_AGE, DFL_FOE_UNB_DLTA, 0, 8);
+
+#ifndef CONFIG_HW_NAT_MANUAL_MODE
+		/* set Bind TCP Age Enable */
+		reg_modify_bits(PPE_FOE_CFG, DFL_FOE_TCP_AGE, 9, 1);
+
+		/* set Bind UDP Age Enable */
+		reg_modify_bits(PPE_FOE_CFG, DFL_FOE_UDP_AGE, 10, 1);
+
+		/* set Bind TCP FIN Age Enable */
+		reg_modify_bits(PPE_FOE_CFG, DFL_FOE_FIN_AGE, 11, 1);
+
+		/* set Delta time for aging out an bind UDP FOE entry */
+		reg_modify_bits(PPE_FOE_BND_AGE0, DFL_FOE_UDP_DLTA, 0, 16);
+
+		/* set Delta time for aging out an bind Non-TCP/UDP FOE entry */
+		reg_modify_bits(PPE_FOE_BND_AGE0, DFL_FOE_NTU_DLTA, 16, 16);
+
+		/* set Delta time for aging out an bind TCP FIN FOE entry */
+		reg_modify_bits(PPE_FOE_BND_AGE1, DFL_FOE_FIN_DLTA, 16, 16);
+
+		/* set Delta time for aging out an bind TCP FOE entry */
+		reg_modify_bits(PPE_FOE_BND_AGE1, DFL_FOE_TCP_DLTA, 0, 16);
+#else
+		/* fix TCP last ACK issue */
+		/* Only need to enable Bind TCP FIN aging out function */
+		reg_modify_bits(PPE_FOE_CFG, DFL_FOE_FIN_AGE, 11, 1);
+		/* set Delta time for aging out an bind TCP FIN FOE entry */
+		reg_modify_bits(PPE_FOE_BND_AGE1, DFL_FOE_FIN_DLTA, 16, 16);
+
+#endif
+}
+
+static void ppe_setfoe_ka(void)
+{
+	/* set Keep alive packet with new/org header */
+	reg_modify_bits(PPE_FOE_CFG, DFL_FOE_KA, 12, 2);
+
+	/* Keep alive timer value */
+	reg_modify_bits(PPE_FOE_KA, DFL_FOE_KA_T, 0, 16);
+
+	/* Keep alive time for bind FOE TCP entry */
+	reg_modify_bits(PPE_FOE_KA, DFL_FOE_TCP_KA, 16, 8);
+
+	/* Keep alive timer for bind FOE UDP entry */
+	reg_modify_bits(PPE_FOE_KA, DFL_FOE_UDP_KA, 24, 8);
+
+	/* Keep alive timer for bind Non-TCP/UDP entry */
+	reg_modify_bits(PPE_BIND_LMT_1, DFL_FOE_NTU_KA, 16, 8);
+
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+		reg_modify_bits(PPE_BIND_LMT_1, DFL_PBND_RD_LMT, 24, 8);
+#endif
+}
+
+static void ppe_setfoe_bind_rate(uint32_t foe_bind_rate)
+{
+	/* Allowed max entries to be build during a time stamp unit */
+
+	/* smaller than 1/4 of total entries */
+	reg_modify_bits(PPE_FOE_LMT1, DFL_FOE_QURT_LMT, 0, 14);
+
+	/* between 1/2 and 1/4 of total entries */
+	reg_modify_bits(PPE_FOE_LMT1, DFL_FOE_HALF_LMT, 16, 14);
+
+	/* between full and 1/2 of total entries */
+	reg_modify_bits(PPE_FOE_LMT2, DFL_FOE_FULL_LMT, 0, 15);
+
+	/* Set reach bind rate for unbind state */
+	reg_modify_bits(PPE_FOE_BNDR, foe_bind_rate, 0, 16);
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+		reg_modify_bits(PPE_FOE_BNDR, DFL_PBND_RD_PRD, 16, 16);
+#endif
+}
+
+static void ppe_setfoe_glocfg_ebl(uint32_t ebl)
+{
+	if (ebl == 1) {
+		/* PPE Engine Enable */
+		reg_modify_bits(PPE_GLO_CFG, 1, 0, 1);
+
+	if (fe_feature & HNAT_IPV6) {
+		/* TSID Enable */
+		pr_notice("TSID Enable\n");
+		reg_modify_bits(PPE_GLO_CFG, 1, 1, 1);
+	}
+
+	if (fe_feature & HNAT_MCAST) {
+		/* Enable multicast table lookup */
+		reg_modify_bits(PPE_GLO_CFG, 1, 7, 1);
+		reg_modify_bits(PPE_GLO_CFG, 0, 12, 2);	/* Decide by PPE entry hash index */
+		reg_modify_bits(PPE_MCAST_PPSE, 0, 0, 4);	/* multicast port0 map to PDMA */
+		reg_modify_bits(PPE_MCAST_PPSE, 1, 4, 4);	/* multicast port1 map to GMAC1 */
+		reg_modify_bits(PPE_MCAST_PPSE, 2, 8, 4);	/* multicast port2 map to GMAC2 */
+		reg_modify_bits(PPE_MCAST_PPSE, 5, 12, 4);	/* multicast port3 map to QDMA */
+	}			/* CONFIG_PPE_MCAST // */
+
+
+	reg_write(PPE_DFT_CPORT, 0);	/* default CPU port is port0 (PDMA) */
+
+	//WDMA, MDMA source port = drop port
+	reg_write(PPE_DFT_CPORT1, 0xcb777);
+	reg_write(PPE_SBW_CTRL, 0x7f);
+
+	//6rd setting
+	reg_modify_bits(PPE_GLO_CFG, 1, 20, 1);
+
+	/* reg_write(PS_CFG, 1); //Enable PacketSampling */
+		if (fe_feature & PPE_MIB) {
+			reg_write(MIB_CFG, 0x03);	/* Enable MIB & read clear */
+			reg_write(MIB_CAH_CTRL, 0x01);	/* enable mib cache */
+		}
+
+		/* PPE Packet with TTL=0 alert to cpu*/
+		reg_modify_bits(PPE_GLO_CFG, DFL_TTL0_DRP, 4, 1);
+
+	} else {
+		/* PPE Engine Disable */
+		reg_modify_bits(PPE_GLO_CFG, 0, 0, 1);
+		if (fe_feature & PPE_MIB)
+			reg_write(MIB_CFG, 0x00);	/* Disable MIB */
+	}
+}
+
+int ppe1_setfoe_hash_mode(u32 hash_mode, struct device *dev)
+{
+	/* Allocate FOE table base */
+	if (!foe_alloc_tbl_ppe1(FOE_4TB_SIZ, dev))
+		return 0;
+
+	switch (FOE_4TB_SIZ) {
+	case 1024:
+		reg_modify_bits(PPE1_FOE_CFG, FOE_TBL_SIZE_1K, 0, 3);
+		break;
+	case 2048:
+		reg_modify_bits(PPE1_FOE_CFG, FOE_TBL_SIZE_2K, 0, 3);
+		break;
+	case 4096:
+		reg_modify_bits(PPE1_FOE_CFG, FOE_TBL_SIZE_4K, 0, 3);
+		break;
+	case 8192:
+		reg_modify_bits(PPE1_FOE_CFG, FOE_TBL_SIZE_8K, 0, 3);
+		break;
+	case 16384:
+		reg_modify_bits(PPE1_FOE_CFG, FOE_TBL_SIZE_16K, 0, 3);
+		break;
+	case 32768:
+		reg_modify_bits(PPE1_FOE_CFG, FOE_TBL_SIZE_32K, 0, 3);
+		break;
+	}
+
+	/* Set Hash Mode */
+	reg_modify_bits(PPE1_FOE_CFG, hash_mode, 14, 2);
+	reg_write(PPE1_HASH_SEED, HASH_SEED);
+
+	reg_modify_bits(PPE1_FOE_CFG, 0, 18, 2);	/* disable */
+
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+		reg_modify_bits(PPE1_FOE_CFG, 1, 6, 1);	/* pre-bind age enable */
+#endif
+	/* Set action for FOE search miss */
+	reg_modify_bits(PPE1_FOE_CFG, FWD_CPU_BUILD_ENTRY, 4, 2);
+
+	return 1;
+}
+
+static void ppe1_setage_out(void)
+{
+	/* set Bind Non-TCP/UDP Age Enable */
+	reg_modify_bits(PPE1_FOE_CFG, DFL_FOE_NTU_AGE, 7, 1);
+
+	/* set Unbind State Age Enable */
+	reg_modify_bits(PPE1_FOE_CFG, DFL_FOE_UNB_AGE, 8, 1);
+
+	/* set min threshold of packet count for aging out at unbind state */
+	reg_modify_bits(PPE1_FOE_UNB_AGE, DFL_FOE_UNB_MNP, 16, 16);
+
+	/* set Delta time for aging out an unbind FOE entry */
+	reg_modify_bits(PPE1_FOE_UNB_AGE, DFL_FOE_UNB_DLTA, 0, 8);
+
+	/* set Bind TCP Age Enable */
+	reg_modify_bits(PPE1_FOE_CFG, DFL_FOE_TCP_AGE, 9, 1);
+
+	/* set Bind UDP Age Enable */
+	reg_modify_bits(PPE1_FOE_CFG, DFL_FOE_UDP_AGE, 10, 1);
+
+	/* set Bind TCP FIN Age Enable */
+	reg_modify_bits(PPE1_FOE_CFG, DFL_FOE_FIN_AGE, 11, 1);
+
+	/* set Delta time for aging out an bind UDP FOE entry */
+	reg_modify_bits(PPE1_FOE_BND_AGE0, DFL_FOE_UDP_DLTA, 0, 16);
+
+	/* set Delta time for aging out an bind Non-TCP/UDP FOE entry */
+	reg_modify_bits(PPE1_FOE_BND_AGE0, DFL_FOE_NTU_DLTA, 16, 16);
+
+	/* set Delta time for aging out an bind TCP FIN FOE entry */
+	reg_modify_bits(PPE1_FOE_BND_AGE1, DFL_FOE_FIN_DLTA, 16, 16);
+
+	/* set Delta time for aging out an bind TCP FOE entry */
+	reg_modify_bits(PPE1_FOE_BND_AGE1, DFL_FOE_TCP_DLTA, 0, 16);
+}
+
+static void ppe1_setfoe_ka(void)
+{
+	/* set Keep alive packet with new/org header */
+	reg_modify_bits(PPE1_FOE_CFG, DFL_FOE_KA, 12, 2);
+
+	/* Keep alive timer value */
+	reg_modify_bits(PPE1_FOE_KA, DFL_FOE_KA_T, 0, 16);
+
+	/* Keep alive time for bind FOE TCP entry */
+	reg_modify_bits(PPE1_FOE_KA, DFL_FOE_TCP_KA, 16, 8);
+
+	/* Keep alive timer for bind FOE UDP entry */
+	reg_modify_bits(PPE1_FOE_KA, DFL_FOE_UDP_KA, 24, 8);
+
+	/* Keep alive timer for bind Non-TCP/UDP entry */
+	reg_modify_bits(PPE1_BIND_LMT_1, DFL_FOE_NTU_KA, 16, 8);
+
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+		reg_modify_bits(PPE1_BIND_LMT_1, DFL_PBND_RD_LMT, 24, 8);
+#endif
+}
+
+static void ppe1_setfoe_bind_rate(uint32_t foe_bind_rate)
+{
+	/* Allowed max entries to be build during a time stamp unit */
+
+	/* smaller than 1/4 of total entries */
+	reg_modify_bits(PPE1_FOE_LMT1, DFL_FOE_QURT_LMT, 0, 14);
+
+	/* between 1/2 and 1/4 of total entries */
+	reg_modify_bits(PPE1_FOE_LMT1, DFL_FOE_HALF_LMT, 16, 14);
+
+	/* between full and 1/2 of total entries */
+	reg_modify_bits(PPE1_FOE_LMT2, DFL_FOE_FULL_LMT, 0, 15);
+
+	/* Set reach bind rate for unbind state */
+	reg_modify_bits(PPE1_FOE_BNDR, foe_bind_rate, 0, 16);
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+		reg_modify_bits(PPE1_FOE_BNDR, DFL_PBND_RD_PRD, 16, 16);
+#endif
+}
+
+static void ppe1_setfoe_glocfg_ebl(uint32_t ebl)
+{
+	if (ebl == 1) {
+		/* PPE Engine Enable */
+		reg_modify_bits(PPE1_GLO_CFG, 1, 0, 1);
+
+	if (fe_feature & HNAT_IPV6) {
+		/* TSID Enable */
+		pr_notice("TSID Enable\n");
+		reg_modify_bits(PPE1_GLO_CFG, 1, 1, 1);
+	}
+
+	if (fe_feature & HNAT_MCAST) {
+		/* Enable multicast table lookup */
+		reg_modify_bits(PPE1_GLO_CFG, 1, 7, 1);
+		reg_modify_bits(PPE1_GLO_CFG, 0, 12, 2);	/* Decide by PPE entry hash index */
+		reg_modify_bits(PPE1_MCAST_PPSE, 0, 0, 4);	/* multicast port0 map to PDMA */
+		reg_modify_bits(PPE1_MCAST_PPSE, 1, 4, 4);	/* multicast port1 map to GMAC1 */
+		reg_modify_bits(PPE1_MCAST_PPSE, 2, 8, 4);	/* multicast port2 map to GMAC2 */
+		reg_modify_bits(PPE1_MCAST_PPSE, 5, 12, 4);	/* multicast port3 map to QDMA */
+	}			/* CONFIG_PPE_MCAST // */
+
+
+	reg_write(PPE1_DFT_CPORT, 0);	/* default CPU port is port0 (PDMA) */
+
+	reg_write(PPE1_DFT_CPORT1, 0xcb777);
+	reg_write(PPE1_SBW_CTRL, 0x7f);
+
+
+	//6rd setting
+	reg_modify_bits(PPE1_GLO_CFG, 1, 20, 1);
+
+
+		if (fe_feature & PPE_MIB) {
+			reg_write(MIB_CFG_PPE1, 0x03);	/* Enable MIB & read clear */
+			reg_write(MIB_CAH_CTRL_PPE1, 0x01);	/* enable mib cache */
+		}
+
+		/* PPE Packet with TTL=0 alert to cpu*/
+		reg_modify_bits(PPE1_GLO_CFG, DFL_TTL0_DRP, 4, 1);
+
+	} else {
+		/* PPE Engine Disable */
+		reg_modify_bits(PPE1_GLO_CFG, 0, 0, 1);
+		if (fe_feature & PPE_MIB)
+			reg_write(MIB_CFG_PPE1, 0x00);	/* Disable MIB */
+	}
+}
+
+
+#if (0)
+static void foe_free_tbl(uint32_t num_of_entry)
+{
+	u32 foe_tbl_size;
+
+	foe_tbl_size = num_of_entry * sizeof(struct foe_entry);
+	dma_free_coherent(NULL, foe_tbl_size, ppe_foe_base, ppe_phy_foe_base);
+	reg_write(PPE_FOE_BASE, 0);
+}
+#endif
+
+int32_t ppe_eng_start(void)
+{
+	/* Set PPE Flow Set */
+	ppe_setfoe_ebl(1);
+
+	/* Set Auto Age-Out Function */
+	ppe_setage_out();
+
+	/* Set PPE FOE KEEPALIVE TIMER */
+	ppe_setfoe_ka();
+
+	/* Set PPE FOE Bind Rate */
+	ppe_setfoe_bind_rate(DFL_FOE_BNDR);
+
+	/* Set PPE Global Configuration */
+	ppe_setfoe_glocfg_ebl(1);
+
+	/* Set Auto Age-Out Function */
+	ppe1_setage_out();
+
+	/* Set PPE FOE KEEPALIVE TIMER */
+	ppe1_setfoe_ka();
+
+	/* Set PPE FOE Bind Rate */
+	ppe1_setfoe_bind_rate(DFL_FOE_BNDR);
+
+	/* Set PPE Global Configuration */
+	ppe1_setfoe_glocfg_ebl(1);
+
+	/*PSE ring full drop enable*/
+	//reg_write(PSE_PPE0_DROP, 0x700);
+	//reg_write(PSE_PPE1_DROP, 0x700);
+
+	return 0;
+}
+
+#if (0)
+static int32_t ppe_eng_stop(void)
+{
+	/* Set PPE FOE ENABLE */
+	ppe_setfoe_glocfg_ebl(0);
+
+	/* Set PPE Flow Set */
+	ppe_setfoe_ebl(0);
+
+	/* Free FOE table */
+	foe_free_tbl(FOE_4TB_SIZ);
+
+	return 0;
+}
+#endif
+
+struct net_device *ra_dev_get_by_name(const char *name)
+{
+	return dev_get_by_name(&init_net, name);
+}
+
+void eth_register(void)
+{
+	struct net_device *dev;
+	int i;
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_LAN);
+	ppe_dev_reg_handler(dev);
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == dev) {
+			pr_notice("%s :dst_port[%d] =%s\n", __func__, i, dev->name);
+			DP_GMAC1 = i;
+			dst_port_type[i] = 0; //set hw fast path
+			break;
+		}
+	}
+	if (fe_feature & GE2_SUPPORT) {
+		dev = ra_dev_get_by_name(DEV_NAME_HNAT_WAN);
+		ppe_dev_reg_handler(dev);
+		for (i = 0; i < MAX_IF_NUM; i++) {
+			if (dst_port[i] == dev) {
+				pr_notice("%s :dst_port[%d] =%s\n", __func__, i, dev->name);
+				DP_GMAC2 = i;
+				dst_port_type[i] = 0; //set hw fast path
+				break;
+			}
+		}
+	}
+
+
+}
+#if(0)
+void modem_if_register(void)
+{
+	struct net_device *dev;
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_CCCI0);
+	ppe_dev_reg_handler(dev);
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_CCCI1);
+	ppe_dev_reg_handler(dev);
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_CCCI2);
+	ppe_dev_reg_handler(dev);
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_CCCI3);
+	ppe_dev_reg_handler(dev);
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_CCCI4);
+	ppe_dev_reg_handler(dev);
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_CCCI5);
+	ppe_dev_reg_handler(dev);
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_CCCI6);
+	ppe_dev_reg_handler(dev);
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_CCCI7);
+	ppe_dev_reg_handler(dev);
+}
+#endif
+void rndis_if_register(void)
+{
+	struct net_device *dev;
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_RNDIS0);
+	ppe_dev_reg_handler(dev);
+}
+
+void wifi_if_register(void)
+{
+	struct net_device *dev;
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_RA0);
+	ppe_dev_reg_handler(dev);
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_RAI0);
+	ppe_dev_reg_handler(dev);
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_RAX0);
+	ppe_dev_reg_handler(dev);
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_APCLI0);
+	ppe_dev_reg_handler(dev);
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_APCLI1);
+	ppe_dev_reg_handler(dev);
+}
+
+void ext_if_regiser(void)
+{
+	struct net_device *dev;
+	int i;
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_EDMA0);
+	ppe_dev_reg_handler(dev);
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == dev) {
+			pr_notice("%s :dst_port[%d] =%s\n", __func__, i, dev->name);
+			DP_EDMA0 = i;
+			dst_port_type[i] = 1; //set sw fast path
+			break;
+		}
+	}
+
+	dev = ra_dev_get_by_name(DEV_NAME_HNAT_EDMA1);
+	ppe_dev_reg_handler(dev);
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == dev) {
+			pr_notice("%s :dst_port[%d] =%s\n", __func__, i, dev->name);
+			DP_EDMA1 = i;
+			dst_port_type[i] = 1; //set sw fast path
+			break;
+		}
+	}
+
+}
+
+void ppe_set_dst_port(uint32_t ebl)
+{
+	int j;
+
+	for (j = 0; j < MAX_IF_NUM; j++)
+		dst_port_type[j] = 0; //default hw fast path
+
+	if (ebl) {
+#ifndef	CONFIG_RAETH_EDMA
+		// HNAT + eth, default case if not EDMA involved
+		eth_register();
+#endif
+		ext_if_regiser();
+		//modem_if_register();
+		wifi_if_register();
+		rndis_if_register();
+	} else {
+		 /* disable */
+		if(dst_port[DP_GMAC1] != NULL)
+			dev_put(dst_port[DP_GMAC1]);
+
+		if(dst_port[DP_GMAC2] != NULL)
+			dev_put(dst_port[DP_GMAC2]);
+
+		for (j = 0; j < MAX_IF_NUM; j++) {
+			if (dst_port[j])
+				dst_port[j] = NULL;
+		}
+	}
+}
+
+uint32_t set_gdma_fwd(uint32_t ebl)
+{
+	u32 data = 0;
+
+	data = reg_read(FE_GDMA1_FWD_CFG);
+
+	if (ebl) {
+		data &= ~0x7777;
+		/* Uni-cast frames forward to PPE */
+		data |= GDM1_UFRC_P_PPE;
+		/* Broad-cast MAC address frames forward to PPE */
+		data |= GDM1_BFRC_P_PPE;
+		/* Multi-cast MAC address frames forward to PPE */
+		data |= GDM1_MFRC_P_PPE;
+		/* Other MAC address frames forward to PPE */
+		data |= GDM1_OFRC_P_PPE;
+
+	} else {
+		data &= ~0x7777;
+		/* Uni-cast frames forward to CPU */
+		data |= GDM1_UFRC_P_CPU;
+		/* Broad-cast MAC address frames forward to CPU */
+		data |= GDM1_BFRC_P_CPU;
+		/* Multi-cast MAC address frames forward to CPU */
+		data |= GDM1_MFRC_P_CPU;
+		/* Other MAC address frames forward to CPU */
+		data |= GDM1_OFRC_P_CPU;
+	}
+
+	//reg_write(FE_GDMA1_FWD_CFG, data);
+
+	if (fe_feature & GE2_SUPPORT) {
+		data = reg_read(FE_GDMA2_FWD_CFG);
+
+		if (ebl) {
+			data &= ~0x7777;
+			/* Uni-cast frames forward to PPE */
+			data |= GDM1_UFRC_P_PPE;
+			/* Broad-cast MAC address frames forward to PPE */
+			data |= GDM1_BFRC_P_PPE;
+			/* Multi-cast MAC address frames forward to PPE */
+			data |= GDM1_MFRC_P_PPE;
+			/* Other MAC address frames forward to PPE */
+			data |= GDM1_OFRC_P_PPE;
+
+		} else {
+			data &= ~0x7777;
+			/* Uni-cast frames forward to CPU */
+			data |= GDM1_UFRC_P_CPU;
+			/* Broad-cast MAC address frames forward to CPU */
+			data |= GDM1_BFRC_P_CPU;
+			/* Multi-cast MAC address frames forward to CPU */
+			data |= GDM1_MFRC_P_CPU;
+			/* Other MAC address frames forward to CPU */
+			data |= GDM1_OFRC_P_CPU;
+		}
+		//reg_write(FE_GDMA2_FWD_CFG, data);
+	}
+
+	return 0;
+}
+
+void ppe_set_cache_ebl(void)
+{
+	/* clear cache table before enabling cache */
+	reg_modify_bits(CAH_CTRL, 1, 9, 1);
+	reg_modify_bits(CAH_CTRL, 0, 9, 1);
+
+	/* Cache enable */
+	reg_modify_bits(CAH_CTRL, 1, 0, 1);
+	reg_modify_bits(CAH_CTRL_PPE1, 1, 9, 1);
+	reg_modify_bits(CAH_CTRL_PPE1, 0, 9, 1);
+	reg_modify_bits(CAH_CTRL_PPE1, 1, 0, 1);
+}
+
+void ppe_set_ip_prot(void)
+{
+	/* IP Protocol Field for IPv4 NAT or IPv6 3-tuple flow */
+	/* Don't forget to turn on related bits in PPE_IP_PROT_CHK register if you want to support
+	 * another IP protocol.
+	 */
+	/* FIXME: enable it to support IP fragement */
+	reg_write(PPE_IP_PROT_CHK, 0xFFFFFFFF);	/* IPV4_NXTH_CHK and IPV6_NXTH_CHK */
+	/* reg_modify_bits(PPE_IP_PROT_0, IPPROTO_GRE, 0, 8); */
+	/* reg_modify_bits(PPE_IP_PROT_0, IPPROTO_TCP, 8, 8); */
+	/* reg_modify_bits(PPE_IP_PROT_0, IPPROTO_UDP, 16, 8); */
+	/* reg_modify_bits(PPE_IP_PROT_0, IPPROTO_IPV6, 24, 8); */
+	reg_write(PPE1_IP_PROT_CHK, 0xFFFFFFFF);
+
+}
+
+int ppe_fill_table_med(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+
+	/* get start addr for each layer */
+	if (ppe_parse_layer_med(skb, entry,ppe_parse_result)) {
+		if (debug_level >= 6)
+			DD;
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+
+
+	/* Set Layer2 Info */
+	if (ppe_fill_L2_info(skb, entry, ppe_parse_result)) {
+		if (debug_level >= 6)
+			DD;
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+
+	/* Set Layer3 Info */
+	if (ppe_fill_L3_info_med(skb, entry, ppe_parse_result)) {
+		if (debug_level >= 6)
+			DD;
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+
+	/* Set Layer4 Info */
+	if (ppe_fill_L4_info(skb, entry, ppe_parse_result)) {
+		if (debug_level >= 6)
+			DD;
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+
+	return 0;
+}
+
+int ppe_fill_table(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+
+	/* get start addr for each layer */
+	if (ppe_parse_layer_info(skb, entry, ppe_parse_result)) {
+		if (debug_level >= 6)
+			DD;
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+
+
+	/* Set Layer2 Info */
+	if (ppe_fill_L2_info(skb, entry, ppe_parse_result)) {
+		if (debug_level >= 6)
+			DD;
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+
+	/* Set Layer3 Info */
+	if (ppe_fill_L3_info(skb, entry, ppe_parse_result)) {
+		if (debug_level >= 6)
+			DD;
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+
+	/* Set Layer4 Info */
+	if (ppe_fill_L4_info(skb, entry, ppe_parse_result)) {
+		if (debug_level >= 6)
+			DD;
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+
+	return 0;
+}
+
+int check_entry_region(struct sk_buff *skb)
+{
+	u8 which_region;
+
+
+	which_region = tx_decide_which_region(skb);
+
+	//if (debug_level >= 6)
+		//pr_notice(" which_region = %d\n", which_region);
+
+	if (which_region == ALL_INFO_ERROR) {
+		if (pr_debug_ratelimited())
+			pr_notice("ppe_tx_handler : ALL_INFO_ERROR\n");
+		return 1;
+	}
+
+	if (FOE_ENTRY_NUM(skb) >= (FOE_4TB_SIZ - 1))
+		return 1;
+
+	return 0;
+}
+
+int check_magic_tag_valid(struct sk_buff *skb)
+{
+	if(is_magic_tag_protect_valid(skb))
+		return 0;
+	else
+		return 1;
+}
+
+int check_use_UDP_3T(struct sk_buff *skb, struct foe_entry *entry)
+{
+#ifdef CONFIG_SUPPORT_WLAN_OPTIMIZE
+		if (bridge_lan_subnet(skb)) {
+			if (!get_skb_interface(skb))
+				USE_3T_UDP_FRAG = 0;
+			else
+				USE_3T_UDP_FRAG = 1;
+			if (USE_3T_UDP_FRAG == 0)
+				return 1;
+		} else {
+			USE_3T_UDP_FRAG = 0;
+		}
+#else
+#if(0)
+			if (bridge_lan_subnet(skb))
+				USE_3T_UDP_FRAG = 1;
+			else
+				USE_3T_UDP_FRAG = 0;
+#endif
+#endif
+
+	return 0;
+}
+
+void clear_mib_count(struct sk_buff *skb, int pse_port)
+{
+	int count = 100000;
+
+	if ((pse_port == WDMA1_PSE_PORT) || (pse_port == EDMA1_PSE_PORT))
+	{
+		reg_write(MIB_SER_CR_PPE1, FOE_ENTRY_NUM(skb) | (1 << 16));
+		do {
+			if (!((reg_read(MIB_SER_CR_PPE1) & 0x10000) >> 16))
+				break;
+				/* usleep_range(100, 110); */
+		} while (--count);
+		reg_read(MIB_SER_R0_PPE1);
+		reg_read(MIB_SER_R1_PPE1);
+		reg_read(MIB_SER_R1_PPE1);
+		reg_read(MIB_SER_R2_PPE1);
+	} else {
+		reg_write(MIB_SER_CR, FOE_ENTRY_NUM(skb) | (1 << 16));
+		do {
+			if (!((reg_read(MIB_SER_CR) & 0x10000) >> 16))
+				break;
+				/* usleep_range(100, 110); */
+		} while (--count);
+		reg_read(MIB_SER_R0);
+		reg_read(MIB_SER_R1);
+		reg_read(MIB_SER_R1);
+		reg_read(MIB_SER_R2);
+	}
+}
+
+int ppe_common_part_med(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result)
+{
+	int ret;
+
+	ret = 0;
+
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+		if (get_done_bit(skb, entry) != 0)
+			return 1;
+#endif
+
+	ret = check_use_UDP_3T(skb, entry);
+
+	//dvt use;
+	ret = 0;
+#if(0)
+	if (ret)
+		return ret;
+#endif
+	/* avoid LAN-WAN NAT link enters bind state */
+	if (IS_IPV4_HNAT(entry)) {
+		if (debug_level >= 7)
+			pr_notice("%s, NAT entry is not allowed to HWNAT !!\n", __func__);
+		return 1;
+	}
+
+	ret = ppe_fill_table_med(skb, entry, ppe_parse_result);
+
+	if (ret)
+		return ret;
+
+	if ((fe_feature & HNAT_QDMA) && (fe_feature & HNAT_MCAST)) {
+		if (ppe_parse_result->is_mcast) {
+			//foe_mcast_entry_qid(ppe_parse_result->vlan1,
+					   // ppe_parse_result->dmac,
+					    //M2Q_table[skb->mark]);
+			//foe_mcast_entry_qid(ppe_parse_result->vlan1,
+					    //ppe_parse_result->dmac,
+					    //0);
+			}
+	}
+	if (fe_feature & PPE_MIB)
+		clear_mib_count(skb, gmac_no);
+
+
+
+	return 0;
+}
+
+int ppe_common_part(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result)
+{
+	int ret;
+
+	ret = 0;
+
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+		if (get_done_bit(skb, entry) != 0)
+			return 1;
+#endif
+
+	ret = check_use_UDP_3T(skb, entry);
+
+	//dvt use;
+	ret = 0;
+#if(0)
+	if (ret)
+		return ret;
+#endif
+
+	ret = ppe_fill_table(skb, entry, ppe_parse_result);
+
+	if (ret)
+		return ret;
+
+	if ((fe_feature & HNAT_QDMA) && (fe_feature & HNAT_MCAST)) {
+		if (ppe_parse_result->is_mcast) {
+			//foe_mcast_entry_qid(ppe_parse_result->vlan1,
+					   // ppe_parse_result->dmac,
+					    //M2Q_table[skb->mark]);
+			//foe_mcast_entry_qid(ppe_parse_result->vlan1,
+					    //ppe_parse_result->dmac,
+					    //0);
+			}
+	}
+	if (fe_feature & PPE_MIB)
+		clear_mib_count(skb, gmac_no);
+
+	return 0;
+}
+
+void set_entry_done(struct sk_buff *skb, struct foe_entry *entry)
+{
+	//entry->ipv4_hnapt.udib1.ilgf = 1;
+
+	ppe_set_entry_bind(skb, entry); /* Enter binding state */
+
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+		set_ppe_table_done(entry);
+		/*make sure data write to dram*/
+		wmb();
+#endif
+}
+
+int ppe_common_eth(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result)
+{
+	int ret;
+
+	ret = ppe_common_part(skb, entry, gmac_no, ppe_parse_result);
+
+	if (ret)
+		return ret;
+
+	if (fe_feature & HNAT_QDMA) {
+		set_ppe_qid(skb, entry);
+		set_eth_fqos(skb, entry);
+	}
+
+	if (fe_feature & GE2_SUPPORT)
+		ret = set_eth_dp_gmac2(entry, gmac_no, ppe_parse_result);
+	else
+		ret = set_eth_dp_gmac1(entry, gmac_no, ppe_parse_result);
+
+	if (ret)
+		return ret;
+	/* For force to cpu handler, record if name */
+	if (ppe_set_ext_if_num(skb, entry)) {
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+	set_entry_done(skb, entry);
+
+	return 0;
+}
+
+int ppe_common_wifi(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result)
+{
+	int ret;
+
+	ret = ppe_common_part(skb, entry, gmac_no, ppe_parse_result);
+
+	if (ret)
+		return ret;
+
+	/* Set force port info */
+	set_wifi_info(skb, entry, gmac_no, ppe_parse_result);
+
+	/* For force to cpu handler, record if name */
+	if (ppe_set_ext_if_num(skb, entry)) {
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+	set_entry_done(skb, entry);
+
+	return 0;
+}
+
+int ppe_common_modem(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result)
+{
+	int ret;
+
+	ret = ppe_common_part_med(skb, entry, gmac_no, ppe_parse_result);
+
+	if (ret)
+		return ret;
+
+	/* Set force port info */
+	set_modem_info(skb, entry, gmac_no, ppe_parse_result);
+
+	if (ppe_set_ext_if_num(skb, entry)) {
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+	set_entry_done(skb, entry);
+
+	return 0;
+}
+
+int ppe_common_rndis(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result)
+{
+	int ret;
+
+	ret = ppe_common_part(skb, entry, gmac_no, ppe_parse_result);
+
+	if (ret)
+		return ret;
+
+	/* Set force port info */
+	set_rndis_info(skb, entry, gmac_no, ppe_parse_result);
+
+	/* For force to cpu handler, record if name */
+	if (ppe_set_ext_if_num(skb, entry)) {
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+	set_entry_done(skb, entry);
+
+	return 0;
+}
+
+
+int ppe_common_ext(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result)
+{
+	int ret;
+
+	ret = ppe_common_part(skb, entry, gmac_no, ppe_parse_result);
+
+	if (ret)
+		return ret;
+
+	/* Set force port info */
+	set_fast_path_info_ext(skb, entry, gmac_no, ppe_parse_result);
+
+	/* For force to cpu handler, record if name */
+	if (ppe_set_ext_if_num(skb, entry)) {
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 1;
+	}
+	set_entry_done(skb, entry);
+
+#ifdef CONFIG_RAETH_EDMA
+	foe_dump_pkt(skb, entry);
+#endif
+
+	return 0;
+}
+int set_pre_bind(struct sk_buff *skb,struct foe_entry *entry)
+{
+/*#ifdef PREBIND_TEST*/
+/*		if (jiffies % 2 == 0) {*/
+/*			pr_notice("drop prebind packet jiffies=%lu\n", jiffies);*/
+/*			memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);*/
+/*			return 0;*/
+/*		}*/
+/*#endif*/
+	if (entry->udib1.preb && entry->bfib1.state != BIND) {
+		entry->bfib1.state = BIND;
+		entry->udib1.preb = 0;
+		/* Dump Binding Entry */
+		if (debug_level >= 1) {
+			foe_dump_entry(FOE_ENTRY_NUM(skb), entry);
+		} else {
+			/* drop duplicate prebind notify packet */
+			memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+int tx_cpu_handler_rndis(struct sk_buff *skb, struct foe_entry *entry, int gmac_no)
+{
+	int ret;
+	struct pkt_parse_result ppe_parse_result;
+
+	if (debug_level >= 7) {
+		if (FOE_AI(skb) == dbg_cpu_reason)
+			foe_dump_pkt_tx(skb, entry);
+	}
+
+	if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+	    (FOE_ALG(skb) == 0)) {
+
+		ret = ppe_common_rndis(skb, entry, gmac_no, &ppe_parse_result);
+
+		if (ret)
+			return ret;
+	} else if ((FOE_AI(skb) == HIT_BIND_KEEPALIVE_MC_NEW_HDR ||
+		  (FOE_AI(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR))) {
+			/*this is duplicate packet in keepalive new header mode*/
+			/*just drop it */
+		if (debug_level >= 3)
+			pr_notice("Wifi TxGot HITBIND_KEEPALIVE_DUP_OLD packe (%s,%d)\n", skb->dev->name,
+				FOE_ENTRY_NUM(skb));
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 0;
+	} else if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+		  (FOE_ALG(skb) == 1)) {
+		if (debug_level >= 3)
+			NAT_PRINT("FOE_ALG=1 (Entry=%d)\n", FOE_ENTRY_NUM(skb));
+	}
+
+	return 1;
+}
+
+int tx_cpu_handler_wifi(struct sk_buff *skb, struct foe_entry *entry, int gmac_no)
+{
+	int ret;
+	struct pkt_parse_result ppe_parse_result;
+
+	if (debug_level >= 7) {
+		if (FOE_AI(skb) == dbg_cpu_reason)
+			foe_dump_pkt_tx(skb, entry);
+	}
+
+	if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+	    (FOE_ALG(skb) == 0)) {
+
+		ret = ppe_common_wifi(skb, entry, gmac_no, &ppe_parse_result);
+
+		if (ret)
+			return ret;
+	} else if ((FOE_AI(skb) == HIT_BIND_KEEPALIVE_MC_NEW_HDR ||
+		  (FOE_AI(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR))) {
+			/*this is duplicate packet in keepalive new header mode*/
+			/*just drop it */
+		if (debug_level >= 3)
+			pr_notice("Wifi TxGot HITBIND_KEEPALIVE_DUP_OLD packe (%s,%d)\n", skb->dev->name,
+				FOE_ENTRY_NUM(skb));
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 0;
+	} else if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+		  (FOE_ALG(skb) == 1)) {
+		if (debug_level >= 3)
+			NAT_PRINT("FOE_ALG=1 (Entry=%d)\n", FOE_ENTRY_NUM(skb));
+	}
+
+	return 1;
+}
+
+int tx_cpu_handler_modem(struct sk_buff *skb, struct foe_entry *entry, int gmac_no)
+{
+	int ret;
+	struct pkt_parse_result ppe_parse_result;
+
+	if (debug_level >= 7) {
+		pr_notice("%s,  cpu_reason = %x, gmac_no = %x FOE_ALG(skb) = %x !!\n", __func__, FOE_AI(skb), gmac_no, FOE_ALG(skb));
+		if (FOE_AI(skb) == dbg_cpu_reason)
+			foe_dump_pkt_tx(skb, entry);
+	}
+
+	if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+	    (FOE_ALG(skb) == 0)) {
+
+		ret = ppe_common_modem(skb, entry, gmac_no, &ppe_parse_result);
+
+		if (ret)
+			return ret;
+	} else if ((FOE_AI(skb) == HIT_BIND_KEEPALIVE_MC_NEW_HDR ||
+		  (FOE_AI(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR))) {
+			/*this is duplicate packet in keepalive new header mode*/
+			/*just drop it */
+		if (debug_level >= 3)
+			pr_notice("Modem TxGot HITBIND_KEEPALIVE_DUP_OLD packe (%s,%d)\n", skb->dev->name,
+				FOE_ENTRY_NUM(skb));
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 0;
+	} else if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+		  (FOE_ALG(skb) == 1)) {
+		if (debug_level >= 3) {
+			//FOE_INFO_DUMP(skb);
+			NAT_PRINT("tx_cpu_handler_modem : FOE_ALG=1 (Entry=%d)\n", FOE_ENTRY_NUM(skb));
+		}
+	}
+	return 1;
+}
+int tx_cpu_handler_eth(struct sk_buff *skb, struct foe_entry *entry, int gmac_no)
+{
+	int ret;
+	struct pkt_parse_result ppe_parse_result;
+
+	if (debug_level >=10)
+		pr_notice("%s,  cpu_reason = %x, gmac_no = %x FOE_ALG(skb) = %x !!\n", __func__, FOE_AI(skb), gmac_no, FOE_ALG(skb));
+	if (debug_level >= 7) {
+		if (FOE_AI(skb) == dbg_cpu_reason)
+			foe_dump_pkt_tx(skb, entry);
+	}
+
+	if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+	    (FOE_ALG(skb) == 0)) {
+
+		ret = ppe_common_eth(skb, entry, gmac_no, &ppe_parse_result);
+
+		if (ret)
+			return ret;
+
+	} else if ((FOE_AI(skb) == HIT_BIND_KEEPALIVE_MC_NEW_HDR ||
+		  (FOE_AI(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR))) {
+			/*this is duplicate packet in keepalive new header mode*/
+			/*just drop it */
+		if (debug_level >= 3)
+			pr_notice("ETH TxGot HITBIND_KEEPALIVE_DUP_OLD packe (%s,%d)\n", skb->dev->name,
+				FOE_ENTRY_NUM(skb));
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 0;
+	} else if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+		  (FOE_ALG(skb) == 1)) {
+		if (debug_level >= 3)
+			NAT_PRINT("FOE_ALG=1 (Entry=%d)\n", FOE_ENTRY_NUM(skb));
+	}
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+		if (FOE_AI(skb) == HIT_PRE_BIND)
+			return set_pre_bind(skb, entry);
+#endif
+
+	return 1;
+}
+
+int tx_cpu_handler_ext(struct sk_buff *skb, struct foe_entry *entry, int gmac_no)
+{
+	int ret;
+	struct pkt_parse_result ppe_parse_result;
+
+	if (debug_level >= 7) {
+	    pr_notice("[HS-ethernet/HWNAT/TX] tx_cpu_handler_ext enter! \n");
+	}
+
+	if (debug_level >= 7) {
+		if (FOE_AI(skb) == dbg_cpu_reason)
+			foe_dump_pkt_tx(skb, entry);
+	}
+
+	if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+	    (FOE_ALG(skb) == 0)) {
+
+		ret = ppe_common_ext(skb, entry, gmac_no, &ppe_parse_result);
+
+		if (ret)
+			return ret;
+	} else if ((FOE_AI(skb) == HIT_BIND_KEEPALIVE_MC_NEW_HDR ||
+		  (FOE_AI(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR))) {
+			/*this is duplicate packet in keepalive new header mode*/
+			/*just drop it */
+		if (debug_level >= 3)
+			pr_notice("ext TxGot HITBIND_KEEPALIVE_DUP_OLD packe (%s,%d)\n", skb->dev->name,
+				FOE_ENTRY_NUM(skb));
+		memset(FOE_INFO_START_ADDR(skb), 0, FOE_INFO_LEN);
+		return 0;
+	} else if ((FOE_AI(skb) == HIT_UNBIND_RATE_REACH) &&
+		  (FOE_ALG(skb) == 1)) {
+		if (debug_level >= 3)
+			NAT_PRINT("FOE_ALG=1 (Entry=%d)\n", FOE_ENTRY_NUM(skb));
+	}
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+		if (FOE_AI(skb) == HIT_PRE_BIND)
+			return set_pre_bind(skb, entry);
+#endif
+
+	return 1;
+}
+
+void rx_debug_log(struct sk_buff *skb)
+{
+	struct foe_entry *entry;
+
+	entry = decide_which_ppe(skb);
+
+	if (debug_level >= 7) {
+		hnat_cpu_reason_cnt(skb);
+		if (FOE_AI(skb) == dbg_cpu_reason)
+			foe_dump_pkt(skb, entry);
+	}
+}
+
+int rx_cpu_handler_modem(struct sk_buff *skb)
+{
+	if (FOE_AI(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR) {
+		if (debug_level >= 3)
+			pr_notice("MODEM Rx Got HITBIND_KEEPALIVE_DUP_OLD packe (%d)\n",
+				FOE_ENTRY_NUM(skb));
+		return 1;
+	}
+
+	return 1;
+}
+int rx_cpu_handler_eth(struct sk_buff *skb)
+{
+	//struct foe_entry *entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+
+	struct foe_entry *entry;
+
+	entry = decide_which_ppe(skb);
+
+
+	if (FOE_AI(skb) == HIT_BIND_FORCE_TO_CPU) {
+
+		return hitbind_force_to_cpu_handler(skb, entry);
+		/* handle the incoming packet which came back from PPE */
+	} else if ((is_if_pcie_wlan_rx(skb) && ((FOE_SP(skb) == 0) || (FOE_SP(skb) == 5) || (FOE_SP(skb) == 11) || (FOE_SP(skb) == 12))) &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_UC_OLD_HDR) &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_MC_NEW_HDR) &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_DUP_OLD_HDR)) {
+		return ppe_extif_pingpong_handler(skb);
+	} else if (FOE_AI(skb) == HIT_BIND_KEEPALIVE_UC_OLD_HDR) {
+		if (debug_level >= 3)
+			pr_notice("eth Got HIT_BIND_KEEPALIVE_UC_OLD_HDR packet (hash index=%d)\n",
+				FOE_ENTRY_NUM(skb));
+		return 1;
+	} else if (FOE_AI(skb) == HIT_BIND_MULTICAST_TO_CPU ||
+		   FOE_AI(skb) == HIT_BIND_MULTICAST_TO_GMAC_CPU) {
+		return hitbind_force_mcast_to_wifi_handler(skb);
+	} else if (FOE_AI(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR) {
+		if (debug_level >= 3)
+			pr_notice("ETH RxGot HIT_BIND_KEEPALIVE_DUP_OLD_HDR packe (hash index=%d)\n",
+				FOE_ENTRY_NUM(skb));
+		keep_alive_old_pkt_handler(skb);
+		/*change to multicast packet, make bridge not learn this packet */
+		/*after kernel-2.6.36 src mac = multicast will drop by bridge,*/
+		/*so we need recover correcet interface*/
+		/*eth->h_source[0] = 0x1;*/
+
+		return 1;
+	}
+
+	return 1;
+}
+
+int rx_cpu_handler_wifi(struct sk_buff *skb)
+{
+	struct foe_entry *entry; // = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+	//int sw_fast_path;
+	/*struct ethhdr *eth = (struct ethhdr *)(skb->data - ETH_HLEN);*/
+
+	entry = decide_which_ppe(skb);
+
+	if (FOE_AI(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR) {
+		if (debug_level >= 3)
+			pr_notice("WIFI Rx Got HITBIND_KEEPALIVE_DUP_OLD packe (%d)\n",
+				FOE_ENTRY_NUM(skb));
+	}
+
+	if((FOE_MAGIC_TAG(skb) == FOE_MAGIC_WED0) ||
+		(FOE_MAGIC_TAG(skb) == FOE_MAGIC_WED1))
+		return 1;
+
+	/* the incoming packet is from PCI or WiFi interface */
+	/* if (is_if_pcie_wlan_rx(skb)) { */
+		/* return ppe_extif_rx_handler(skb); */
+	if ((FOE_MAGIC_TAG(skb) == FOE_MAGIC_PCI) ||
+	    (FOE_MAGIC_TAG(skb) == FOE_MAGIC_WLAN) ||
+	    (FOE_MAGIC_TAG(skb) == FOE_MAGIC_RNDIS)) {
+/*		if(fe_feature & HNAT_IPI)*/
+/*			return HnatIPIExtIfHandler(skb);*/
+		return ppe_extif_rx_handler(skb);
+	} else if (FOE_AI(skb) == HIT_BIND_FORCE_TO_CPU) {
+/*		if(fe_feature & HNAT_IPI)*/
+/*			return HnatIPIForceCPU(skb);*/
+
+		return hitbind_force_to_cpu_handler(skb, entry);
+
+		/* handle the incoming packet which came back from PPE */
+	} else if ((is_if_pcie_wlan_rx(skb) && ((FOE_SP(skb) == 0) || (FOE_SP(skb) == 5))) &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_UC_OLD_HDR) &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_MC_NEW_HDR) &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_DUP_OLD_HDR)) {
+		return ppe_extif_pingpong_handler(skb);
+	}
+	return 1;
+}
+
+int rx_cpu_handler_ext(struct sk_buff *skb)
+{
+	struct foe_entry *entry;
+
+	if (FOE_AI(skb) == HIT_BIND_FORCE_TO_CPU) {
+
+		if (debug_level >= 7) {
+			pr_notice("%s, HIT_BIND_FORCE_TO_CPU, FOE_SP(skb):%d\n", __func__, FOE_SP(skb));
+		}
+		entry = decide_which_ppe(skb);
+
+		return hitbind_force_to_cpu_handler(skb, entry);
+
+		/* handle the incoming packet which came back from PPE */
+	} else if ((is_if_pcie_wlan_rx(skb) && ((FOE_SP(skb) == 11) || (FOE_SP(skb) == 12)))  &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_UC_OLD_HDR) &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_MC_NEW_HDR) &&
+		   (FOE_AI(skb) != HIT_BIND_KEEPALIVE_DUP_OLD_HDR)) {
+		if (debug_level >= 7) {
+			pr_notice("%s, FOE_SP(skb):%d, FOE_AI(skb):0x%x, SP match to handler\n",
+				__func__, FOE_SP(skb), FOE_AI(skb));
+		}
+		return ppe_extif_pingpong_handler(skb);
+	}
+	return 1;
+}
+
+int rx_cpu_handler_rndis(struct sk_buff *skb)
+{
+	/* the incoming packet is from USB interface */
+	if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_RNDIS) {
+		return ppe_extif_rx_handler(skb);
+
+	}
+	return 1;
+}
+
+void foe_format_create(struct sk_buff *skb)
+{
+	u32 alg_tmp, sp_tmp, entry_tmp, ai_tmp;
+
+	if (IS_SPACE_AVAILABLE_HEAD(skb)) {
+		FOE_TAG_PROTECT_HEAD(skb) = TAG_PROTECT;
+		if (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE || FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_EDMARX) {
+			alg_tmp = 0;
+			sp_tmp = FOE_SP_HEAD(skb);
+			entry_tmp = FOE_ENTRY_NUM_HEAD(skb);
+			ai_tmp = FOE_AI_HEAD(skb);
+			FOE_SP(skb) = sp_tmp & 0xf;
+			FOE_ENTRY_NUM(skb) = entry_tmp & 0x7fff;
+			FOE_AI(skb) = ai_tmp & 0x1f;
+			FOE_ALG(skb) = alg_tmp & 0x1;
+
+		}
+		if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_WED0)
+			FOE_SP(skb) = WDMA0_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_WED1)
+			FOE_SP(skb) = WDMA1_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_MED)
+			FOE_SP(skb) = MDMA_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_EDMA0)
+			FOE_SP(skb) = EDMA0_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_EDMA1)
+			FOE_SP(skb) = EDMA1_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_RNDIS)
+			FOE_SP(skb) = ADMA_PSE_PORT;
+	}
+	if (IS_SPACE_AVAILABLE_TAIL(skb)) {
+		FOE_ALG_TAIL(skb) = 0;
+		FOE_TAG_PROTECT_TAIL(skb) = TAG_PROTECT;
+		FOE_ENTRY_NUM_MSB_TAIL(skb) = FOE_ENTRY_NUM(skb) & 0x3fff;
+		FOE_ENTRY_NUM_LSB_TAIL(skb) = (FOE_ENTRY_NUM(skb) & 0x4000) >> 14;
+		FOE_AI_TAIL(skb) = FOE_AI(skb);
+		if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_WED0)
+			FOE_SP_TAIL(skb) = WDMA0_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_WED1)
+			FOE_SP_TAIL(skb) = WDMA1_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_MED)
+			FOE_SP_TAIL(skb) = MDMA_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_EDMA0)
+			FOE_SP_TAIL(skb) = EDMA0_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_EDMA1)
+			FOE_SP_TAIL(skb) = EDMA1_PSE_PORT;
+		else if (FOE_MAGIC_TAG(skb) == FOE_MAGIC_RNDIS)
+			FOE_SP(skb) = ADMA_PSE_PORT;
+	}
+}
+
+void ppe_eng_init(void)
+{
+	ppe_set_ip_prot();
+	ppe_set_cache_ebl();
+
+	/* Initialize PPE related register */
+	ppe_eng_start();
+}
+
+int check_whitelist(struct sk_buff *skb)
+{
+	int i, dev_match;
+
+	dev_match = 1;
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if (dst_port[i] == skb->dev) {
+			dev_match = 0;
+			/* pr_notice("%s : Interface=%s, vir_if_idx=%x\n", __func__, skb->dev, vir_if_idx); */
+			break;
+		}
+	}
+
+#ifdef CONFIG_RAETH_EDMA
+	for (i = 1; i < MAX_IF_NUM; i++) {
+		if(dst_port[i]->name == NULL) {
+			dev_match = 1;
+			if (debug_level >= 7) {
+				pr_err("[HS-ethernet/HWNAT/TX] %s : dst_port[%d] name is NULL\n", __func__, i);
+			}
+			return dev_match;
+		}
+
+		if ((strcmp(dst_port[i]->name, DEV_NAME) == 0 && strcmp(skb->dev->name, AQR_DEV_NAME) == 0) ||
+		    (strcmp(dst_port[i]->name, DEV2_NAME) == 0 && strcmp(skb->dev->name, AQR_DEV2_NAME) == 0)) {
+			dev_match = 0;
+			if (debug_level >= 7) {
+				pr_notice("[HS-ethernet/HWNAT/TX] %s : dst_port[%d]  Interface =%s Match\n", __func__, i, skb->dev->name);
+			}
+			break;
+		}
+	}
+#endif
+
+	if (dev_match == 11) {
+		pr_err("%s : dev not found\n", __func__);
+	}
+
+	return dev_match;
+}
+
+struct foe_entry  *decide_which_ppe(struct sk_buff *skb)
+{
+	struct foe_entry *entry;
+
+	if((FOE_SP(skb) == GDMA0_PSE_PORT) || (FOE_SP(skb) == GDMA1_PSE_PORT)) {
+		entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+	} else if(FOE_SP(skb) == WDMA0_PSE_PORT) {
+		entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+	} else if(FOE_SP(skb) == WDMA1_PSE_PORT) {
+		entry = &ppe1_foe_base[FOE_ENTRY_NUM(skb)];
+	} else if(FOE_SP(skb) == MDMA_PSE_PORT) {
+		entry = &ppe1_foe_base[FOE_ENTRY_NUM(skb)];
+	} else if((FOE_SP(skb) == EDMA0_PSE_PORT)) {
+		entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+	} else if((FOE_SP(skb) == EDMA1_PSE_PORT) ) {
+		entry = &ppe1_foe_base[FOE_ENTRY_NUM(skb)];
+ 	} else if((FOE_SP(skb) == ADMA_PSE_PORT) || (FOE_SP(skb) == QDMA_PSE_PORT)) {
+		entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+	} else {
+		entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+		if (debug_level >= 3) {
+			/* [MAGIC is PPE] extif -> eth_tx (pse port is uninitialized) -> eth rx -> pingpong */
+			/* [MAGIC is WLAN] rx wifi (pse port is uninitialized) */
+			if (FOE_MAGIC_TAG(skb) != FOE_MAGIC_PPE && FOE_MAGIC_TAG(skb) != FOE_MAGIC_WLAN) {
+				FOE_INFO_DUMP(skb);
+				pr_notice("%s, SP port error = %d, %s\n", __func__, FOE_SP(skb), skb->dev->name);
+			}
+		}
+	}
+	set_rxif_idx(entry, FOE_IF_IDX(skb));
+
+	return entry;
+}
+
+void hwnat_config_setting(void)
+{
+	hnat_chip_name |= MT7621_HWNAT;
+	hnat_chip_name |= MT7622_HWNAT;
+	hnat_chip_name |= MT7623_HWNAT;
+	hnat_chip_name |= LEOPARD_HWNAT;
+
+	pr_notice("hnat_chip_name = %x\n", hnat_chip_name);
+}
+
+void fe_feature_setting(void)
+{
+	int i;
+	fe_feature |= GE2_SUPPORT;
+	fe_feature |= HNAT_IPV6;
+	fe_feature |= HNAT_VLAN_TX;
+	fe_feature |= HNAT_MCAST;
+	fe_feature |= HNAT_QDMA;
+	fe_feature |= WARP_WHNAT;
+	fe_feature |= WIFI_HNAT;
+	fe_feature |= HNAT_WAN_P4;
+	fe_feature |= WAN_TO_WLAN_QOS;
+	fe_feature |= HNAT_SP_TAG;
+	fe_feature |= QDMA_TX_RX;
+	fe_feature |= PPE_MIB;
+	fe_feature |= PACKET_SAMPLING;
+	fe_feature |= HNAT_OPENWRT;
+	fe_feature |= HNAT_WLAN_QOS;
+	fe_feature |= WLAN_OPTIMIZE;
+	fe_feature |= UDP_FRAG;
+	fe_feature |= AUTO_MODE;
+	fe_feature |= SEMI_AUTO_MODE;
+	fe_feature |= MANUAL_MODE;
+	fe_feature |= PRE_BIND;
+	fe_feature |= ACCNT_MAINTAINER;
+	fe_feature |= HNAT_IPI;
+	fe_feature |= DBG_IPV6_SIP;
+	fe_feature |= DBG_IPV4_SIP;
+	fe_feature |= DBG_SP;
+	fe_feature |= ETH_QOS;
+	fe_feature |= SW_DVFS;
+
+	pr_notice("fe_feature = %x\n", fe_feature);
+	for (i = 0; i < ARRAY_SIZE(mtk_hnat_feature_name); i++) {
+		if (fe_feature & BIT(i))
+			pr_notice("!! hwnat feature :%s\n", mtk_hnat_feature_name[i]);
+	}
+}
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_common.h b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_common.h
new file mode 100755
index 0000000..780b4ed
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_common.h
@@ -0,0 +1,133 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _RA_COMMON_WANTED
+#define _RA_COMMON_WANTED
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "foe_fdb.h"
+
+#ifndef NEXTHDR_IPIP
+#define NEXTHDR_IPIP 4
+#endif
+
+int ppe_setfoe_hash_mode(u32 hash_mode, struct device *dev);
+void ppe_set_dst_port(uint32_t ebl);
+void ppe_set_ip_prot(void);
+void foe_ac_update_ebl(int ebl);
+int32_t ppe_eng_start(void);
+void ppe_dev_reg_handler(struct net_device *dev);
+void ppe_dev_unreg_handler(struct net_device *dev);
+uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb);
+uint32_t foe_dump_pkt(struct sk_buff *skb, struct foe_entry *entry);
+int hitbind_force_to_cpu_handler(struct sk_buff *skb, struct foe_entry *entry);
+uint32_t ppe_extif_rx_handler(struct sk_buff *skb);
+uint32_t ppe_extif_pingpong_handler(struct sk_buff *skb);
+uint32_t keep_alive_handler(struct sk_buff *skb, struct foe_entry *entry);
+uint32_t keep_alive_old_pkt_handler(struct sk_buff *skb);
+int hitbind_force_to_cpu_handler(struct sk_buff *skb, struct foe_entry *entry);
+int32_t ppe_parse_layer_info(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result);
+int get_skb_interface(struct sk_buff *skb);
+uint16_t tx_decide_which_region(struct sk_buff *skb);
+int bridge_lan_subnet(struct sk_buff *skb);
+int get_done_bit(struct sk_buff *skb, struct foe_entry *entry);
+int hitbind_force_mcast_to_wifi_handler(struct sk_buff *skb);
+uint16_t is_if_pcie_wlan_rx(struct sk_buff *skb);
+uint16_t is_magic_tag_protect_valid(struct sk_buff *skb);
+int32_t setforce_port_qdmatx_qdmarx(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result);
+int32_t setforce_port_qdmatx_pdmarx(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result);
+int32_t setforce_port_pdmatx_pdmarx(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result);
+uint32_t ppe_set_ext_if_num(struct sk_buff *skb, struct foe_entry *entry);
+void set_ppe_table_done(struct foe_entry *entry);
+uint32_t set_gdma_fwd(uint32_t ebl);
+unsigned char *FOE_INFO_START_ADDR(struct sk_buff *skb);
+int ppe_fill_table(struct sk_buff *skb, struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result);
+int set_force_port_info(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result);
+int check_use_UDP_3T(struct sk_buff *skb, struct foe_entry *entry);
+int check_entry_region(struct sk_buff *skb);
+void clear_mib_count(struct sk_buff *skb, int pse_port);
+int ppe_common_part(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result);
+int ppe_common_eth(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result);
+int ppe_common_ext(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+				    struct pkt_parse_result *ppe_parse_result);
+int check_magic_tag_valid(struct sk_buff *skb);
+int set_pre_bind(struct sk_buff *skb,struct foe_entry *entry);
+int tx_cpu_handler_wifi(struct sk_buff *skb, struct foe_entry *entry, int gmac_no);
+int tx_cpu_handler_eth(struct sk_buff *skb, struct foe_entry *entry, int gmac_no);
+int tx_cpu_handler_ext(struct sk_buff *skb, struct foe_entry *entry, int gmac_no);
+int tx_cpu_handler_modem(struct sk_buff *skb, struct foe_entry *entry, int gmac_no);
+int tx_cpu_handler_rndis(struct sk_buff *skb, struct foe_entry *entry, int gmac_no);
+void set_eth_fqos(struct sk_buff *skb, struct foe_entry *entry);
+int rx_cpu_handler_eth(struct sk_buff *skb);
+int rx_cpu_handler_wifi(struct sk_buff *skb);
+int rx_cpu_handler_modem(struct sk_buff *skb);
+int rx_cpu_handler_rndis(struct sk_buff *skb);
+void foe_format_create(struct sk_buff *skb);
+void rx_debug_log(struct sk_buff *skb);
+void ppe_eng_init(void);
+int check_whitelist(struct sk_buff *skb);
+int check_hnat_type(struct sk_buff *skb);
+void set_fast_path_info_ext(struct sk_buff *skb, struct foe_entry *entry, int gmac_no,
+		 struct pkt_parse_result *ppe_parse_result);
+struct foe_entry  *decide_which_ppe(struct sk_buff *skb);
+int rx_cpu_handler_ext(struct sk_buff *skb);
+int ppe1_setfoe_hash_mode(u32 hash_mode, struct device *dev);
+void hwnat_config_setting(void);
+void fe_feature_setting(void);
+void FOE_INFO_DUMP(struct sk_buff *skb);
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_config.h b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_config.h
new file mode 100755
index 0000000..222ccc3
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_config.h
@@ -0,0 +1,274 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _HNAT_CONFIG_WANTED
+#define _HNAT_CONFIG_WANTED
+
+//#include "raeth_config.h"
+
+#define CONFIG_RAETH_GMAC2
+#define CONFIG_MACH_LEOPARD
+//#define CONFIG_RAETH_HW_VLAN_TX
+
+#if defined(CONFIG_MEDIATEK_HSETH)
+#define CONFIG_RAETH_QDMA
+#define CONFIG_RAETH_EDMA
+#else
+#define CONFIG_RAETH_QDMA
+#endif
+
+#if defined(CONFIG_ARCH_MT7622)
+#define USE_UDP_FRAG
+#endif
+
+#ifdef CONFIG_RALINK_MT7620
+#define MT7620_HWNAT	BIT(0)
+#else
+#define MT7620_HWNAT	(0)
+#endif
+
+#ifdef CONFIG_RALINK_MT7621
+#define MT7621_HWNAT	BIT(1)
+#else
+#define MT7621_HWNAT	(0)
+#endif
+#ifdef CONFIG_ARCH_MT7622
+#define MT7622_HWNAT	BIT(2)
+#else
+#define MT7622_HWNAT	(0)
+#endif
+#ifdef CONFIG_ARCH_MT7623
+#define MT7623_HWNAT	BIT(3)
+#else
+#define MT7623_HWNAT	(0)
+#endif
+#ifdef CONFIG_MACH_LEOPARD
+#define LEOPARD_HWNAT	BIT(4)
+#else
+#define LEOPARD_HWNAT	(0)
+#endif
+
+#ifdef	CONFIG_RAETH_GMAC2
+#define GE2_SUPPORT	(1)
+#else
+#define GE2_SUPPORT	(0)
+#endif
+
+#ifdef	CONFIG_RA_HW_NAT_IPV6
+#define HNAT_IPV6	BIT(1)
+#else
+#define HNAT_IPV6	(0)
+#endif
+
+#ifdef	CONFIG_RAETH_HW_VLAN_TX
+#define HNAT_VLAN_TX	BIT(2)
+#else
+#define HNAT_VLAN_TX	(0)
+#endif
+
+#ifdef	CONFIG_PPE_MCAST
+#define HNAT_MCAST	BIT(3)
+#else
+#define HNAT_MCAST	(0)
+#endif
+
+#ifdef	CONFIG_RAETH_QDMA
+#define HNAT_QDMA	BIT(4)
+#else
+#define HNAT_QDMA	(0)
+#endif
+
+#ifdef	CONFIG_ARCH_MT7622_WIFI_HW_NAT
+#define WARP_WHNAT	BIT(5)
+#else
+#define WARP_WHNAT	(0)
+#endif
+
+#ifdef	CONFIG_RA_HW_NAT_WIFI
+#define WIFI_HNAT	BIT(6)
+#else
+#define WIFI_HNAT	(0)
+#endif
+
+#ifdef	CONFIG_RA_HW_NAT_WIFI_NEW_ARCH
+#define WIFI_HNAT	BIT(6)
+#else
+#define WIFI_HNAT	(0)
+#endif
+
+#ifdef	CONFIG_WAN_AT_P4
+#define HNAT_WAN_P4	BIT(7)
+#else
+#define HNAT_WAN_P4	(0)
+#endif
+
+#ifdef	CONFIG_WAN_TO_WLAN_SUPPORT_QOS
+#define WAN_TO_WLAN_QOS	BIT(8)
+#else
+#define WAN_TO_WLAN_QOS		(0)
+#endif
+
+#ifdef	CONFIG_RAETH_SPECIAL_TAG
+#define HNAT_SP_TAG	BIT(9)
+#else
+#define HNAT_SP_TAG		(0)
+#endif
+
+#ifdef CONFIG_RAETH_QDMATX_QDMARX
+#define QDMA_TX_RX	BIT(10)
+#else
+#define QDMA_TX_RX		(0)
+#endif
+
+#ifdef CONFIG_PPE_MIB
+#define PPE_MIB	BIT(11)
+#else
+#define PPE_MIB		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_PACKET_SAMPLING
+#define PACKET_SAMPLING	BIT(12)
+#else
+#define PACKET_SAMPLING		(0)
+#endif
+
+#ifdef CONFIG_SUPPORT_OPENWRT
+#define HNAT_OPENWRT	BIT(13)
+#else
+#define HNAT_OPENWRT		(0)
+#endif
+
+#ifdef CONFIG_SUPPORT_WLAN_QOS
+#define HNAT_WLAN_QOS	BIT(14)
+#else
+#define HNAT_WLAN_QOS		(0)
+#endif
+
+#ifdef CONFIG_SUPPORT_WLAN_OPTIMIZE
+#define WLAN_OPTIMIZE	BIT(15)
+#else
+#define WLAN_OPTIMIZE		(0)
+#endif
+
+#ifdef USE_UDP_FRAG
+#define UDP_FRAG	BIT(16)
+#else
+#define UDP_FRAG		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_AUTO_MODE
+#define AUTO_MODE	BIT(17)
+#else
+#define AUTO_MODE		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+#define SEMI_AUTO_MODE	BIT(18)
+#else
+#define SEMI_AUTO_MODE		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_MANUAL_MODE
+#define MANUAL_MODE	BIT(19)
+#else
+#define MANUAL_MODE		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+#define PRE_BIND	BIT(20)
+#else
+#define PRE_BIND		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_ACCNT_MAINTAINER
+#define ACCNT_MAINTAINER	BIT(21)
+#else
+#define ACCNT_MAINTAINER		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_IPI
+#define HNAT_IPI	BIT(21)
+#else
+#define HNAT_IPI		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_HASH_DBG_IPV6_SIP
+#define DBG_IPV6_SIP	BIT(22)
+#else
+#define DBG_IPV6_SIP		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_HASH_DBG_IPV4_SIP
+#define DBG_IPV4_SIP	BIT(23)
+#else
+#define DBG_IPV4_SIP		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_HASH_DBG_SPORT
+#define DBG_SP	BIT(24)
+#else
+#define DBG_SP		(0)
+#endif
+
+#ifdef CONFIG_QDMA_SUPPORT_QOS
+#define ETH_QOS	BIT(25)
+#else
+#define ETH_QOS		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_SW_DVFS
+#define SW_DVFS	BIT(26)
+#else
+#define SW_DVFS		(0)
+#endif
+
+#endif
+
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_api.c b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_api.c
new file mode 100755
index 0000000..17ecc96
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_api.c
@@ -0,0 +1,228 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "ra_nat.h"
+
+#include "frame_engine.h"
+#include "foe_fdb.h"
+#include "hwnat_ioctl.h"
+#include "util.h"
+#include "api.h"
+#include "hwnat_config.h"
+#include "hwnat_define.h"
+#include "hnat_dbg_proc.h"
+
+#define DD \
+{\
+pr_info("%s %d\n", __func__, __LINE__); \
+}
+
+void dbg_dump_entry(uint32_t index)
+{
+	struct foe_entry *entry = &ppe_foe_base[index];
+
+	if (IS_IPV4_HNAPT(entry)) {
+		NAT_PRINT
+		    ("NAPT(%d): %u.%u.%u.%u:%d->%u.%u.%u.%u:%d ->", index,
+		     IP_FORMAT3(entry->ipv4_hnapt.sip),
+		     IP_FORMAT2(entry->ipv4_hnapt.sip),
+		     IP_FORMAT1(entry->ipv4_hnapt.sip),
+		     IP_FORMAT0(entry->ipv4_hnapt.sip),
+		     entry->ipv4_hnapt.sport,
+		     IP_FORMAT3(entry->ipv4_hnapt.dip), IP_FORMAT2(entry->ipv4_hnapt.dip),
+		     IP_FORMAT1(entry->ipv4_hnapt.dip), IP_FORMAT0(entry->ipv4_hnapt.dip),
+		     entry->ipv4_hnapt.dport);
+		NAT_PRINT
+		    (" %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_hnapt.new_sip),
+		     IP_FORMAT2(entry->ipv4_hnapt.new_sip),
+		     IP_FORMAT1(entry->ipv4_hnapt.new_sip),
+		     IP_FORMAT0(entry->ipv4_hnapt.new_sip), entry->ipv4_hnapt.new_sport,
+		     IP_FORMAT3(entry->ipv4_hnapt.new_dip),
+		     IP_FORMAT2(entry->ipv4_hnapt.new_dip),
+		     IP_FORMAT1(entry->ipv4_hnapt.new_dip),
+		     IP_FORMAT0(entry->ipv4_hnapt.new_dip), entry->ipv4_hnapt.new_dport);
+	} else if (IS_IPV4_HNAT(entry)) {
+		NAT_PRINT("NAT(%d): %u.%u.%u.%u->%u.%u.%u.%u ->", index,
+			  IP_FORMAT3(entry->ipv4_hnapt.sip),
+			  IP_FORMAT2(entry->ipv4_hnapt.sip),
+			  IP_FORMAT1(entry->ipv4_hnapt.sip),
+			  IP_FORMAT0(entry->ipv4_hnapt.sip),
+			  IP_FORMAT3(entry->ipv4_hnapt.dip),
+			  IP_FORMAT2(entry->ipv4_hnapt.dip),
+			  IP_FORMAT1(entry->ipv4_hnapt.dip), IP_FORMAT0(entry->ipv4_hnapt.dip));
+		NAT_PRINT(" %u.%u.%u.%u->%u.%u.%u.%u\n",
+			  IP_FORMAT3(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT2(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT1(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT0(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT3(entry->ipv4_hnapt.new_dip),
+			  IP_FORMAT2(entry->ipv4_hnapt.new_dip),
+			  IP_FORMAT1(entry->ipv4_hnapt.new_dip),
+			  IP_FORMAT0(entry->ipv4_hnapt.new_dip));
+	}
+
+	if (IS_IPV6_1T_ROUTE(entry)) {
+		NAT_PRINT("IPv6_1T(%d): %08X:%08X:%08X:%08X\n", index,
+			  entry->ipv6_1t_route.ipv6_dip3,
+			  entry->ipv6_1t_route.ipv6_dip2,
+			  entry->ipv6_1t_route.ipv6_dip1, entry->ipv6_1t_route.ipv6_dip0);
+	} else if (IS_IPV4_DSLITE(entry)) {
+		NAT_PRINT
+		    ("IPv4 Ds-Lite(%d): %u.%u.%u.%u.%d->%u.%u.%u.%u:%d ->", index,
+		     IP_FORMAT3(entry->ipv4_dslite.sip),
+		     IP_FORMAT2(entry->ipv4_dslite.sip),
+		     IP_FORMAT1(entry->ipv4_dslite.sip),
+		     IP_FORMAT0(entry->ipv4_dslite.sip), entry->ipv4_dslite.sport,
+		     IP_FORMAT3(entry->ipv4_dslite.dip),
+		     IP_FORMAT2(entry->ipv4_dslite.dip),
+		     IP_FORMAT1(entry->ipv4_dslite.dip),
+		     IP_FORMAT0(entry->ipv4_dslite.dip), entry->ipv4_dslite.dport);
+		NAT_PRINT(" %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			  entry->ipv4_dslite.tunnel_sipv6_0,
+			  entry->ipv4_dslite.tunnel_sipv6_1,
+			  entry->ipv4_dslite.tunnel_sipv6_2,
+			  entry->ipv4_dslite.tunnel_sipv6_3,
+			  entry->ipv4_dslite.tunnel_dipv6_0,
+			  entry->ipv4_dslite.tunnel_dipv6_1,
+			  entry->ipv4_dslite.tunnel_dipv6_2, entry->ipv4_dslite.tunnel_dipv6_3);
+	} else if (IS_IPV6_3T_ROUTE(entry)) {
+		NAT_PRINT
+		    ("IPv6_3T(%d): %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Prot=%d)\n",
+		     index,
+		     entry->ipv6_3t_route.ipv6_sip0,
+		     entry->ipv6_3t_route.ipv6_sip1,
+		     entry->ipv6_3t_route.ipv6_sip2,
+		     entry->ipv6_3t_route.ipv6_sip3,
+		     entry->ipv6_3t_route.ipv6_dip0,
+		     entry->ipv6_3t_route.ipv6_dip1,
+		     entry->ipv6_3t_route.ipv6_dip2,
+		     entry->ipv6_3t_route.ipv6_dip3, entry->ipv6_3t_route.prot);
+	} else if (IS_IPV6_5T_ROUTE(entry)) {
+		if (IS_IPV6_FLAB_EBL()) {
+			NAT_PRINT
+			    ("IPv6_5T(%d): %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X",
+			     index,
+			     entry->ipv6_5t_route.ipv6_sip0,
+			     entry->ipv6_5t_route.ipv6_sip1,
+			     entry->ipv6_5t_route.ipv6_sip2,
+			     entry->ipv6_5t_route.ipv6_sip3,
+			     entry->ipv6_5t_route.ipv6_dip0,
+			     entry->ipv6_5t_route.ipv6_dip1,
+			     entry->ipv6_5t_route.ipv6_dip2, entry->ipv6_5t_route.ipv6_dip3);
+			NAT_PRINT("(Flow Label=%08X)\n",
+				  ((entry->ipv6_5t_route.
+				    sport << 16) | (entry->ipv6_5t_route.dport)) & 0xFFFFF);
+		} else {
+			NAT_PRINT
+			    ("IPv6_5T(%d): %08X:%08X:%08X:%08X:%d-> ",
+			     index,
+			     entry->ipv6_5t_route.ipv6_sip0,
+			     entry->ipv6_5t_route.ipv6_sip1,
+			     entry->ipv6_5t_route.ipv6_sip2,
+			     entry->ipv6_5t_route.ipv6_sip3, entry->ipv6_5t_route.sport);
+			NAT_PRINT("%08X:%08X:%08X:%08X:%d\n",
+				  entry->ipv6_5t_route.ipv6_dip0,
+				  entry->ipv6_5t_route.ipv6_dip1,
+				  entry->ipv6_5t_route.ipv6_dip2,
+				  entry->ipv6_5t_route.ipv6_dip3, entry->ipv6_5t_route.dport);
+		}
+	} else if (IS_IPV6_6RD(entry)) {
+		if (IS_IPV6_FLAB_EBL()) {
+			NAT_PRINT
+			    ("IPv6_6RD(%d): %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X",
+			     index,
+			     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			     entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1,
+			     entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3);
+			NAT_PRINT("(Flow Label=%08X)\n",
+				  ((entry->ipv6_5t_route.
+				    sport << 16) | (entry->ipv6_5t_route.dport)) & 0xFFFFF);
+		} else {
+			NAT_PRINT
+			    ("IPv6_6RD(%d): %08X:%08X:%08X:%08X:%d-> ",
+			     index,
+			     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			     entry->ipv6_6rd.sport);
+			NAT_PRINT(" %08X:%08X:%08X:%08X:%d\n", entry->ipv6_6rd.ipv6_dip0,
+				  entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2,
+				  entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport);
+		}
+	}
+}
+
+void dbg_dump_cr(void)
+{
+	unsigned int cr_base;
+	int i;
+	int cr_max;
+
+	if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT))
+		cr_base = 0x1B100C00;
+	else
+		cr_base = 0x1B100E00;
+	cr_max = 319 * 4;
+	for (i = 0; i < cr_max; i = i + 0x10) {
+		pr_info("0x%08x : 0x%08x 0x%08x 0x%08x 0x%08x\n", cr_base + i,
+			reg_read(PPE_MCAST_L_10 + i), reg_read(PPE_MCAST_L_10 + i + 4),
+			reg_read(PPE_MCAST_L_10 + i + 8), reg_read(PPE_MCAST_L_10 + i + 0xc));
+	}
+}
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_proc.c b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_proc.c
new file mode 100755
index 0000000..d9fe34b
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_proc.c
@@ -0,0 +1,1686 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/seq_file.h>
+#include "ra_nat.h"
+#include "frame_engine.h"
+#include "foe_fdb.h"
+#include "hnat_ioctl.h"
+#include "util.h"
+#include "hnat_config.h"
+#include "hnat_define.h"
+#include "hnat_dbg_proc.h"
+#include "mcast_tbl.h"
+#include "hnat_common.h"
+
+struct proc_dir_entry *hnat_proc_reg_dir;
+static struct proc_dir_entry *proc_cpu_reason;
+static struct proc_dir_entry *proc_hnat_entry;
+static struct proc_dir_entry *proc_hnat_setting;
+static struct proc_dir_entry *proc_hnat_multicast;
+static struct proc_dir_entry *proc_hnat_whitelist;
+static struct proc_dir_entry *proc_hnat_type;
+static struct proc_dir_entry *proc_hnat_qos;
+static struct proc_dir_entry *proc_hnat_mib;
+static struct proc_dir_entry *proc_hnat_med;
+static struct proc_dir_entry *proc_hnat_mdma;
+static struct proc_dir_entry *proc_hnat_disabling_hwnat;
+
+int dbg_cpu_reason;
+EXPORT_SYMBOL(dbg_cpu_reason);
+
+struct hwnat_interface hnat_if[MAX_IF_NUM];
+EXPORT_SYMBOL(hnat_if);
+
+int disabling_hwnat;
+EXPORT_SYMBOL(disabling_hwnat);
+
+bool ppe_mib_counter_en;
+
+int dbg_entry_state = BIND;
+typedef int (*CPU_REASON_SET_FUNC) (int par1, int par2, int par3);
+typedef int (*ENTRY_SET_FUNC) (int par1, int par2, int par3);
+typedef int (*CR_SET_FUNC) (int par1, int par2, int par3);
+typedef int (*MULTICAST_SET_FUNC) (int par1, int par2, int par3);
+typedef int (*WHITELIST_SET_FUNC) (int par1, char *par2, int par3);
+typedef int (*TYPE_SET_FUNC) (int par1, char *par2, int par3);
+typedef int (*QOS_SET_FUNC) (int par1, int par2, int par3);
+typedef int (*MIB_SET_FUNC) (int par1, char *par2);
+
+void dbg_dump_entry(uint32_t index, struct foe_entry *entry)
+{
+	if (IS_IPV4_HNAPT(entry)) {
+		NAT_PRINT
+		    ("NAPT(%d): %u.%u.%u.%u:%d->%u.%u.%u.%u:%d ->", index,
+		     IP_FORMAT3(entry->ipv4_hnapt.sip),
+		     IP_FORMAT2(entry->ipv4_hnapt.sip),
+		     IP_FORMAT1(entry->ipv4_hnapt.sip),
+		     IP_FORMAT0(entry->ipv4_hnapt.sip),
+		     entry->ipv4_hnapt.sport,
+		     IP_FORMAT3(entry->ipv4_hnapt.dip), IP_FORMAT2(entry->ipv4_hnapt.dip),
+		     IP_FORMAT1(entry->ipv4_hnapt.dip), IP_FORMAT0(entry->ipv4_hnapt.dip),
+		     entry->ipv4_hnapt.dport);
+		NAT_PRINT
+		    (" %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
+		     IP_FORMAT3(entry->ipv4_hnapt.new_sip),
+		     IP_FORMAT2(entry->ipv4_hnapt.new_sip),
+		     IP_FORMAT1(entry->ipv4_hnapt.new_sip),
+		     IP_FORMAT0(entry->ipv4_hnapt.new_sip), entry->ipv4_hnapt.new_sport,
+		     IP_FORMAT3(entry->ipv4_hnapt.new_dip),
+		     IP_FORMAT2(entry->ipv4_hnapt.new_dip),
+		     IP_FORMAT1(entry->ipv4_hnapt.new_dip),
+		     IP_FORMAT0(entry->ipv4_hnapt.new_dip), entry->ipv4_hnapt.new_dport);
+	} else if (IS_IPV4_HNAT(entry)) {
+		NAT_PRINT("NAT(%d): %u.%u.%u.%u->%u.%u.%u.%u ->", index,
+			  IP_FORMAT3(entry->ipv4_hnapt.sip),
+			  IP_FORMAT2(entry->ipv4_hnapt.sip),
+			  IP_FORMAT1(entry->ipv4_hnapt.sip),
+			  IP_FORMAT0(entry->ipv4_hnapt.sip),
+			  IP_FORMAT3(entry->ipv4_hnapt.dip),
+			  IP_FORMAT2(entry->ipv4_hnapt.dip),
+			  IP_FORMAT1(entry->ipv4_hnapt.dip), IP_FORMAT0(entry->ipv4_hnapt.dip));
+		NAT_PRINT(" %u.%u.%u.%u->%u.%u.%u.%u\n",
+			  IP_FORMAT3(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT2(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT1(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT0(entry->ipv4_hnapt.new_sip),
+			  IP_FORMAT3(entry->ipv4_hnapt.new_dip),
+			  IP_FORMAT2(entry->ipv4_hnapt.new_dip),
+			  IP_FORMAT1(entry->ipv4_hnapt.new_dip),
+			  IP_FORMAT0(entry->ipv4_hnapt.new_dip));
+	}
+
+	if (IS_IPV6_1T_ROUTE(entry)) {
+		NAT_PRINT("IPv6_1T(%d): %08X:%08X:%08X:%08X\n", index,
+			  entry->ipv6_1t_route.ipv6_dip3,
+			  entry->ipv6_1t_route.ipv6_dip2,
+			  entry->ipv6_1t_route.ipv6_dip1, entry->ipv6_1t_route.ipv6_dip0);
+	} else if (IS_IPV4_DSLITE(entry)) {
+		NAT_PRINT
+		    ("IPv4 Ds-Lite(%d): %u.%u.%u.%u.%d->%u.%u.%u.%u:%d ->", index,
+		     IP_FORMAT3(entry->ipv4_dslite.sip),
+		     IP_FORMAT2(entry->ipv4_dslite.sip),
+		     IP_FORMAT1(entry->ipv4_dslite.sip),
+		     IP_FORMAT0(entry->ipv4_dslite.sip), entry->ipv4_dslite.sport,
+		     IP_FORMAT3(entry->ipv4_dslite.dip),
+		     IP_FORMAT2(entry->ipv4_dslite.dip),
+		     IP_FORMAT1(entry->ipv4_dslite.dip),
+		     IP_FORMAT0(entry->ipv4_dslite.dip), entry->ipv4_dslite.dport);
+		NAT_PRINT(" %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
+			  entry->ipv4_dslite.tunnel_sipv6_0,
+			  entry->ipv4_dslite.tunnel_sipv6_1,
+			  entry->ipv4_dslite.tunnel_sipv6_2,
+			  entry->ipv4_dslite.tunnel_sipv6_3,
+			  entry->ipv4_dslite.tunnel_dipv6_0,
+			  entry->ipv4_dslite.tunnel_dipv6_1,
+			  entry->ipv4_dslite.tunnel_dipv6_2, entry->ipv4_dslite.tunnel_dipv6_3);
+	} else if (IS_IPV6_3T_ROUTE(entry)) {
+		NAT_PRINT
+		    ("IPv6_3T(%d): %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Prot=%d)\n",
+		     index,
+		     entry->ipv6_3t_route.ipv6_sip0,
+		     entry->ipv6_3t_route.ipv6_sip1,
+		     entry->ipv6_3t_route.ipv6_sip2,
+		     entry->ipv6_3t_route.ipv6_sip3,
+		     entry->ipv6_3t_route.ipv6_dip0,
+		     entry->ipv6_3t_route.ipv6_dip1,
+		     entry->ipv6_3t_route.ipv6_dip2,
+		     entry->ipv6_3t_route.ipv6_dip3, entry->ipv6_3t_route.prot);
+	} else if (IS_IPV6_5T_ROUTE(entry)) {
+		if (IS_IPV6_FLAB_EBL()) {
+			NAT_PRINT
+			    ("IPv6_5T(%d): %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X",
+			     index,
+			     entry->ipv6_5t_route.ipv6_sip0,
+			     entry->ipv6_5t_route.ipv6_sip1,
+			     entry->ipv6_5t_route.ipv6_sip2,
+			     entry->ipv6_5t_route.ipv6_sip3,
+			     entry->ipv6_5t_route.ipv6_dip0,
+			     entry->ipv6_5t_route.ipv6_dip1,
+			     entry->ipv6_5t_route.ipv6_dip2, entry->ipv6_5t_route.ipv6_dip3);
+			NAT_PRINT("(Flow Label=%08X)\n",
+				  ((entry->ipv6_5t_route.
+				    sport << 16) | (entry->ipv6_5t_route.dport)) & 0xFFFFF);
+		} else {
+			NAT_PRINT
+			    ("IPv6_5T(%d): %08X:%08X:%08X:%08X:%d-> ",
+			     index,
+			     entry->ipv6_5t_route.ipv6_sip0,
+			     entry->ipv6_5t_route.ipv6_sip1,
+			     entry->ipv6_5t_route.ipv6_sip2,
+			     entry->ipv6_5t_route.ipv6_sip3, entry->ipv6_5t_route.sport);
+			NAT_PRINT("%08X:%08X:%08X:%08X:%d\n",
+				  entry->ipv6_5t_route.ipv6_dip0,
+				  entry->ipv6_5t_route.ipv6_dip1,
+				  entry->ipv6_5t_route.ipv6_dip2,
+				  entry->ipv6_5t_route.ipv6_dip3, entry->ipv6_5t_route.dport);
+		}
+	} else if (IS_IPV6_6RD(entry)) {
+		if (IS_IPV6_FLAB_EBL()) {
+			NAT_PRINT
+			    ("IPv6_6RD(%d): %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X",
+			     index,
+			     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			     entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1,
+			     entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3);
+			NAT_PRINT("(Flow Label=%08X)\n",
+				  ((entry->ipv6_5t_route.
+				    sport << 16) | (entry->ipv6_5t_route.dport)) & 0xFFFFF);
+		} else {
+			NAT_PRINT
+			    ("IPv6_6RD(%d): %08X:%08X:%08X:%08X:%d-> ",
+			     index,
+			     entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
+			     entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3,
+			     entry->ipv6_6rd.sport);
+			NAT_PRINT(" %08X:%08X:%08X:%08X:%d\n", entry->ipv6_6rd.ipv6_dip0,
+				  entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2,
+				  entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport);
+		}
+	}
+}
+
+void dbg_dump_cr(void)
+{
+	unsigned int cr_base;
+	int i;
+	int cr_max;
+
+	if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT))
+		cr_base = 0x1B100C00;
+	else
+		cr_base = 0x1B100E00;
+	cr_max = 319 * 4;
+	for (i = 0; i < cr_max; i = i + 0x10) {
+		pr_notice("0x%08x : 0x%08x 0x%08x 0x%08x 0x%08x\n", cr_base + i,
+			reg_read(PPE_MCAST_L_10 + i), reg_read(PPE_MCAST_L_10 + i + 4),
+			reg_read(PPE_MCAST_L_10 + i + 8), reg_read(PPE_MCAST_L_10 + i + 0xc));
+	}
+}
+
+int hnat_set_usage(int level, int ignore2, int ignore3)
+{
+	pr_notice("Choose CPU reason 'x'");
+	pr_notice(" we can see which entry indx has cpu reason 'x'\n");
+	pr_notice("echo \"1 [cpu_reason]\" > /proc/%s\n",
+		PROCREG_CPU_REASON);
+
+	pr_notice("(2)IPv4(IPv6) TTL(hop limit) = 0\n");
+	pr_notice("(3)IPv4(IPv6) has option(extension) header\n");
+	pr_notice("(7)No flow is assigned\n");
+	pr_notice("(8)IPv4 HNAT doesn't support IPv4 /w fragment\n");
+	pr_notice("(9)IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n");
+	pr_notice("(10)IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n");
+	pr_notice("(11)IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n");
+	pr_notice("(12) Ingress packet is TCP fin/syn/rst\n");
+	pr_notice("(13) FOE Un-hit\n");
+	pr_notice("(14) FOE Hit unbind\n");
+	pr_notice("(15) FOE Hit unbind & rate reach\n");
+	pr_notice("(16) Hit bind PPE TCP FIN entry\n");
+	pr_notice("(17) Hit bind PPE entry and TTL(hop limit) = 1\n");
+	pr_notice("(18) Hit bind and VLAN replacement violation\n");
+	pr_notice("(19) Hit bind and keep alive with unicast old-header packet\n");
+	pr_notice("(20) Hit bind and keep alive with multicast new-header packet\n");
+	pr_notice("(21) Hit bind and keep alive with duplicate old-header packet\n");
+	pr_notice("(22) FOE Hit bind & force to CPU\n");
+	/* Hit bind and remove tunnel IP header, */
+	/* but inner IP has option/next header */
+	pr_notice("(23) HIT_BIND_WITH_OPTION_HEADER\n");
+	pr_notice("(28) Hit bind and exceed MTU\n");
+	pr_notice("(27) HIT_BIND_PACKET_SAMPLING\n");
+	pr_notice("(24) Switch clone multicast packet to CPU\n");
+	pr_notice("(25) Switch clone multicast packet to GMAC1 & CPU\n");
+	pr_notice("(26) HIT_PRE_BIND\n");
+	debug_level = level;
+	pr_notice("debug_level = %d\n", debug_level);
+	return 0;
+}
+
+int entry_set_usage(int level, int ignore2, int ignore3)
+{
+	pr_notice("<Usage> Get all bind entry  : cat /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_ENTRY);
+
+	pr_notice("<Usage> set entry state  : echo 1 [STATE] > /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_ENTRY);
+	pr_notice("<Usage> get entry detail  : echo 2 [index] > /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_ENTRY);
+	pr_notice("<Usage> delete entry  : echo 3 [index] > /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_ENTRY);
+	pr_notice("STATE: NVALID = 0, UNBIND = 1, BIND = 2, FIN = 3\n");
+	pr_notice("<Usage> set debug level : echo 0 [level] >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_ENTRY);
+	debug_level = level;
+	pr_notice("debug_level = %d\n", debug_level);
+
+	return 0;
+}
+
+int cr_set_usage(int level, int ignore2, int ignore3)
+{
+	pr_notice("<Usage> get hnat CR  : cat /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+
+	pr_notice("<Usage> set debug level : echo 0 [level] >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+	pr_notice("<Usage> set binding threshold : echo 1 [threshold] >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+	pr_notice("<Usage> set bind lifetime");
+	pr_notice(" :echo 2 [tcp_life] [udp_life] [fin_life] >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+	pr_notice("<Usage> set keep alive interval");
+	pr_notice(": echo 3 [tcp_interval] [udp_interval]  >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+
+	pr_notice("<Usage> enable 464XLAT : echo 4 [1:enable,0:disable] >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+
+	debug_level = level;
+	pr_notice("debug_level = %d\n", debug_level);
+
+	return 0;
+}
+
+int multicast_set_usage(int level, int ignore2, int ignore3)
+{
+	pr_notice("<Usage> get hnat multicast table  : cat /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_MULTICAST);
+
+	pr_notice("<Usage> set debug level : echo 0 [level] >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+
+	debug_level = level;
+	pr_notice("debug_level = %d\n", debug_level);
+
+	return 0;
+}
+
+int whitelist_set_usage(int level, char *ignore2, int ignore3)
+{
+	pr_notice("<Usage> get hnat whitelist table  : cat /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_WHITELIST);
+	pr_notice("<Usage> set hnat whitelist table  : echo 1 rax /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_WHITELIST);
+	pr_notice("<Usage> set debug level : echo 0 [level] >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+
+	debug_level = level;
+	pr_notice("debug_level = %d\n", debug_level);
+
+	return 0;
+}
+
+int mib_set_usage(int level, char *ignore2)
+{
+	pr_notice("<Usage> set debug level : echo 0 [level] > /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_MIB);
+
+	pr_notice("<Usage> set which interface getting mib: echo 1 [rax] > /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_MIB);
+
+	pr_notice("<Usage> set entry index getting mib: echo 2 [entry_idx] > /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_MIB);
+
+	pr_notice("<Usage> set mib counter enable/disable: echo 3 [disable:0/enable:1] > /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_MIB);
+
+	pr_notice("<Usage> set accounting group getting mib: echo 4 [ac_grp] > /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_MIB);
+
+	debug_level = level;
+	pr_notice("debug_level = %d\n", debug_level);
+
+	return 0;
+}
+
+int type_set_usage(int level, char *ignore2, int ignore3)
+{
+	pr_notice("<Usage> get hnat type table  : cat /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_TYPE);
+	pr_notice("<Usage> set hnat interface SW fast : echo 1 rax /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_TYPE);
+	pr_notice("<Usage> set hnat interface HW fast : echo 2 rax /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_TYPE);
+	pr_notice("<Usage> set debug level : echo 0 [level] >  /proc/%s/%s\n",
+		HNAT_PROCREG_DIR, PROCREG_PPE_SETTING);
+
+	debug_level = level;
+	pr_notice("debug_level = %d\n", debug_level);
+
+	return 0;
+}
+
+int qos_set_usage(int fqos, int ignor, int ignore3)
+{
+	set_fqos = fqos;
+	pr_notice("set_fqos = %d\n", set_fqos);
+
+	return 0;
+}
+
+int whitelist_set_if(int level, char *interface, int ignore3)
+{
+	struct net_device *dev;
+
+	dev = dev_get_by_name(&init_net, interface);
+	ppe_dev_reg_handler(dev);
+
+	return 0;
+}
+
+int mib_set_if(int ignore, char *interface)
+{
+	struct net_device *dev;
+	struct rtnl_link_stats64 stats = {0};
+	int ret;
+
+	dev = dev_get_by_name(&init_net, interface);
+
+	ret = ppe_get_dev_stats_handler(dev, &stats);
+
+	if (ret == 1) { /* succeed */
+		pr_notice("interface %s\n", interface);
+		pr_notice("tx: %llu pkt, %llu bytes\n", stats.tx_packets, stats.tx_bytes);
+		pr_notice("rx: %llu pkt, %llu bytes\n", stats.rx_packets, stats.rx_bytes);
+	} else {
+		pr_notice("%s, interface %s, get mib error!\n", __func__, interface);
+	}
+	return 0;
+}
+
+int type_set_sw(int level, char *interface, int ignore3)
+{
+	struct net_device *dev;
+	int i;
+
+	dev = dev_get_by_name(&init_net, interface);
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if(dst_port[i] == dev) {
+			dst_port_type[i] = 1;
+			pr_notice("set %s software fast path\n", interface);
+			break;
+		}
+	}
+	return 0;
+}
+
+int type_set_hw(int level, char *interface, int ignore3)
+{
+	struct net_device *dev;
+	int i;
+
+	dev = dev_get_by_name(&init_net, interface);
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if(dst_port[i] == dev) {
+			dst_port_type[i] = 0;
+			pr_notice("set %s hardware acceleration\n", interface);
+			break;
+		}
+	}
+	return 0;
+}
+int whitelist_del_if(int level, char *interface, int ignore3)
+{
+	struct net_device *dev;
+
+	dev = dev_get_by_name(&init_net, interface);
+	ppe_dev_unreg_handler(dev);
+
+	return 0;
+}
+
+int mib_set_idx(int entry_num, char *ignore)
+{
+	unsigned long tx_pkt_cnt;
+	unsigned long tx_byte_cnt;
+
+	ppe_mib_dump(entry_num, &tx_pkt_cnt, &tx_byte_cnt);
+
+	return 0;
+}
+
+int mib_set_counter(int mib_counter, char *ignore)
+{
+	ppe_mib_counter_en = mib_counter;
+	pr_notice("\nppe_mib_counter_en = %d\n", ppe_mib_counter_en);
+
+	return 0;
+}
+
+int mib_set_agcnt(int agcnt, char *ignore)
+{
+	struct hwnat_ac_args args = {0};
+	args.ag_index = agcnt;
+
+	ppe_get_agcnt(&args);
+
+	pr_notice("\n%s, acnt:%d, packet cnt:%lld, byte cnt: %lld\n",
+		__func__, agcnt, args.ag_pkt_cnt, args.ag_byte_cnt);
+
+	return 0;
+}
+
+
+int binding_threshold(int threshold, int ignore1, int ignore2)
+{
+	pr_notice("Binding Threshold =%d\n", threshold);
+	reg_write(PPE_FOE_BNDR, threshold);
+	return 0;
+}
+
+int bind_life_time(int tcp_life, int udp_life, int fin_life)
+{
+	pr_notice("tcp_life = %d, udp_life = %d, fin_life = %d\n",
+		tcp_life, udp_life, fin_life);
+	ppe_set_bind_lifetime(tcp_life, udp_life, fin_life);
+	return 0;
+}
+
+int keep_alive_interval(int tcp_interval, int udp_interval, int ignore2)
+{
+	if (tcp_interval > 255 || udp_interval > 255) {
+		tcp_interval = 255;
+		udp_interval = 255;
+		pr_notice("TCP/UDP keep alive max interval = 255\n");
+	} else {
+		pr_notice("tcp_interval = %d, udp_interval = %d\n",
+			tcp_interval, udp_interval);
+	}
+
+	ppe_set_ka_interval(tcp_interval, udp_interval);
+	return 0;
+}
+
+int enable_464xlat(int enable, int ignore1, int ignore2)
+{
+	pr_notice("%s, enable = %d\n", __func__, enable);
+
+	ppe_set_464_enable(enable);
+	xlat_enable = enable;
+
+	return 0;
+}
+
+
+int hnat_cpu_reason(int cpu_reason, int ignore1, int ignore2)
+{
+	dbg_cpu_reason = cpu_reason;
+	pr_notice("show cpu reason = %d entry index = %d\n",
+		cpu_reason, hwnat_dbg_entry);
+	/* foe_dump_entry(hwnat_dbg_entry); */
+
+	return 0;
+}
+
+int entry_set_state(int state, int ignore1, int ignore2)
+{
+	dbg_entry_state = state;
+	pr_notice("ENTRY STATE = %s\n",
+		dbg_entry_state ==
+		0 ? "Invalid" : dbg_entry_state ==
+		1 ? "Unbind" : dbg_entry_state ==
+		2 ? "BIND" : dbg_entry_state ==   3 ?
+		"FIN" : "Unknown");
+	return 0;
+}
+
+int entry_detail(int index, int ignore1, int ignore2)
+{
+	struct foe_entry *entry = NULL;
+
+	entry = &ppe_foe_base[index];
+	foe_dump_entry(index, entry);
+	return 0;
+}
+
+int entry_delete(int index, int ignore1, int ignore2)
+{
+	pr_notice("delete entry idx = %d\n", index);
+	foe_del_entry_by_num(index);
+	return 0;
+}
+
+static const CPU_REASON_SET_FUNC hnat_set_func[] = {
+	[0] = hnat_set_usage,
+	[1] = hnat_cpu_reason,
+};
+
+static const ENTRY_SET_FUNC entry_set_func[] = {
+	[0] = entry_set_usage,
+	[1] = entry_set_state,
+	[2] = entry_detail,
+	[3] = entry_delete,
+};
+
+static const CR_SET_FUNC cr_set_func[] = {
+	[0] = cr_set_usage,
+	[1] = binding_threshold,
+	[2] = bind_life_time,
+	[3] = keep_alive_interval,
+	[4] = enable_464xlat,
+};
+//TBD multcast forward setting
+static const MULTICAST_SET_FUNC multicast_set_func[] = {
+	[0] = multicast_set_usage,
+};
+
+static const WHITELIST_SET_FUNC whitelist_set_func[] = {
+	[0] = whitelist_set_usage,
+	[1] = whitelist_set_if,
+	[2] = whitelist_del_if,
+};
+
+static const TYPE_SET_FUNC type_set_func[] = {
+	[0] = type_set_usage,
+	[1] = type_set_sw,
+	[2] = type_set_hw,
+};
+
+static const QOS_SET_FUNC qos_set_func[] = {
+	[0] = qos_set_usage,
+};
+
+static const MIB_SET_FUNC mib_set_func[] = {
+	[0] = mib_set_usage,
+	[1] = mib_set_if,
+	[2] = mib_set_idx,
+	[3] = mib_set_counter,
+	[4] = mib_set_agcnt
+};
+
+ssize_t cpu_reason_write(struct file *file, const char __user *buffer,
+			 size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0, arg2 = 0, arg3 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_notice("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_notice("write parameter data = %s\n", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	case 1:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	}
+
+	if (hnat_set_func[arg0] &&
+	    (ARRAY_SIZE(hnat_set_func) > arg0)) {
+		(*hnat_set_func[arg0]) (arg1, arg2, arg3);
+	} else {
+		pr_notice("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*hnat_set_func[0]) (0, 0, 0);
+	}
+
+	return len;
+}
+
+ssize_t entry_write(struct file *file, const char __user *buffer,
+		    size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0, arg2 = 0, arg3 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_notice("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_notice("write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	case 1:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	case 2:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg2 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg2);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg3 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg3);
+		break;
+	case 3:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg2 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg2);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg3 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg3);
+		break;
+	}
+
+	if (entry_set_func[arg0] &&
+	    (ARRAY_SIZE(entry_set_func) > arg0)) {
+		(*entry_set_func[arg0]) (arg1, arg2, arg3);
+	} else {
+		pr_notice("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*entry_set_func[0]) (0, 0, 0);
+	}
+
+	return len;
+}
+
+ssize_t setting_write(struct file *file, const char __user *buffer,
+		      size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0, arg2 = 0, arg3 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_notice("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_notice("write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+	case 1:
+	case 4:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+
+	case 2:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg2 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg2);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg3 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg3);
+		break;
+	case 3:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg2 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg2);
+		break;
+	}
+
+	if (cr_set_func[arg0] &&
+	    (ARRAY_SIZE(cr_set_func) > arg0)) {
+		(*cr_set_func[arg0]) (arg1, arg2, arg3);
+	} else {
+		pr_notice("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*cr_set_func[0]) (0, 0, 0);
+	}
+
+	return len;
+}
+
+ssize_t multicast_write(struct file *file, const char __user *buffer,
+			size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0, arg2 = 0, arg3 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_notice("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_notice("write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	}
+
+	if (multicast_set_func[arg0] &&
+	    (ARRAY_SIZE(multicast_set_func) > arg0)) {
+		(*multicast_set_func[arg0]) (arg1, arg2, arg3);
+	} else {
+		pr_notice("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*multicast_set_func[0]) (0, 0, 0);
+	}
+
+	return len;
+}
+
+ssize_t whitelist_write(struct file *file, const char __user *buffer,
+			size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0, arg3 = 0;
+	char *p_token = NULL;
+	char *arg2 = NULL;
+	char *p_delimiter = " \t";
+	char *p_delimiter1 = " \n";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_notice("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_notice("write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	case 1:
+		p_token = strsep(&p_buf, p_delimiter1);
+		arg2 = p_token;
+		break;
+	case 2:
+		p_token = strsep(&p_buf, p_delimiter1);
+		arg2 = p_token;
+		break;
+	}
+
+	if (whitelist_set_func[arg0] &&
+	    (ARRAY_SIZE(whitelist_set_func) > arg0)) {
+		(*whitelist_set_func[arg0]) (arg1, arg2, arg3);
+	} else {
+		pr_notice("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*whitelist_set_func[0]) (0, 0, 0);
+	}
+
+	return len;
+}
+
+ssize_t type_write(struct file *file, const char __user *buffer,
+			size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0, arg3 = 0;
+	char *p_token = NULL;
+	char *arg2 = NULL;
+	char *p_delimiter = " \t";
+	char *p_delimiter1 = " \n";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_notice("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_notice("write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	case 1:
+		p_token = strsep(&p_buf, p_delimiter1);
+		arg2 = p_token;
+		break;
+	case 2:
+		p_token = strsep(&p_buf, p_delimiter1);
+		arg2 = p_token;
+		break;
+	}
+
+	if (type_set_func[arg0] &&
+	    (ARRAY_SIZE(type_set_func) > arg0)) {
+		(*type_set_func[arg0]) (arg1, arg2, arg3);
+	} else {
+		pr_notice("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*type_set_func[0]) (0, 0, 0);
+	}
+
+	return len;
+}
+
+ssize_t qos_write(struct file *file, const char __user *buffer,
+			size_t count, loff_t *data)
+
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg1 = 0, arg2 = 0, arg3 = 0;
+	char *p_token = NULL;
+	char *p_delimiter = " \t";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_notice("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_notice("write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg1 = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg1);
+		break;
+	}
+
+	if (qos_set_func[arg0] &&
+	    (ARRAY_SIZE(qos_set_func) > arg0)) {
+		(*qos_set_func[arg0]) (arg1, arg2, arg3);
+	} else {
+		pr_notice("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*qos_set_func[0]) (0, 0, 0);
+	}
+
+	return len;
+}
+
+ssize_t mib_write(struct file *file, const char __user *buffer,
+			size_t count, loff_t *data)
+{
+	char buf[32];
+	char *p_buf;
+	int len = count;
+	long arg0 = 0, arg_long = 0;
+	char *p_token = NULL;
+	char *arg_char = NULL;
+	char *p_delimiter = " \t";
+	char *p_delimiter1 = " \n";
+	int ret;
+
+	if (len >= sizeof(buf)) {
+		pr_notice("input handling fail!\n");
+		len = sizeof(buf) - 1;
+		return -1;
+	}
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	pr_notice("write parameter data = %s\n\r", buf);
+
+	p_buf = buf;
+	p_token = strsep(&p_buf, p_delimiter);
+	if (!p_token)
+		arg0 = 0;
+	else
+		ret = kstrtol(p_token, 10, &arg0);
+
+	switch (arg0) {
+	case 0:
+	case 2:
+	case 3:
+	case 4:
+		p_token = strsep(&p_buf, p_delimiter);
+		if (!p_token)
+			arg_long = 0;
+		else
+			ret = kstrtol(p_token, 10, &arg_long);
+		break;
+	case 1:
+		p_token = strsep(&p_buf, p_delimiter1);
+		arg_char = p_token;
+		break;
+	}
+
+	if (mib_set_func[arg0] &&
+	    (ARRAY_SIZE(mib_set_func) > arg0)) {
+		(*mib_set_func[arg0]) (arg_long, arg_char);
+	} else {
+		pr_notice("no handler defined for command id(0x%08lx)\n\r", arg0);
+		(*mib_set_func[0]) (0, 0);
+	}
+
+	return len;
+}
+
+
+ssize_t med_write(struct file *file, const char __user *buffer,
+			size_t count, loff_t *data)
+{
+	return 0;
+}
+
+ssize_t mdma_write(struct file *file, const char __user *buffer,
+			size_t count, loff_t *data)
+{
+	return 0;
+}
+
+
+ssize_t disabling_write(struct file *file, const char __user *buffer,
+			 size_t count, loff_t *data)
+{
+	if (count > 0) {
+		char c;
+		int val;
+
+		if (get_user(c, buffer))
+			return -EFAULT;
+
+		val = (c != '0');
+
+		disabling_hwnat = val;
+	}
+
+	return count;
+}
+
+int cpu_reason_read(struct seq_file *seq, void *v)
+{
+	int i;
+
+	pr_notice("============ CPU REASON =========\n");
+	pr_notice("(2)IPv4(IPv6) TTL(hop limit) = %u\n",
+		dbg_cpu_reason_cnt[0]);
+	pr_notice("(3)Ipv4(IPv6) has option(extension) header = %u\n",
+		dbg_cpu_reason_cnt[1]);
+	pr_notice("(7)No flow is assigned = %u\n", dbg_cpu_reason_cnt[2]);
+	pr_notice("(8)IPv4 HNAT doesn't support IPv4 /w fragment = %u\n",
+		dbg_cpu_reason_cnt[3]);
+	pr_notice("(9)IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment = %u\n",
+		dbg_cpu_reason_cnt[4]);
+	pr_notice("(10)IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport = %u\n",
+		dbg_cpu_reason_cnt[5]);
+	pr_notice("(11)IPv6 5T-route/6RD can't find TCP/UDP sport/dport = %u\n",
+		dbg_cpu_reason_cnt[6]);
+	pr_notice("(12)Ingress packet is TCP fin/syn/rst = %u\n",
+		dbg_cpu_reason_cnt[7]);
+	pr_notice("(13)FOE Un-hit = %u\n", dbg_cpu_reason_cnt[8]);
+	pr_notice("(14)FOE Hit unbind = %u\n", dbg_cpu_reason_cnt[9]);
+	pr_notice("(15)FOE Hit unbind & rate reach = %u\n", dbg_cpu_reason_cnt[10]);
+	pr_notice("(16)Hit bind PPE TCP FIN entry = %u\n", dbg_cpu_reason_cnt[11]);
+	pr_notice("(17)Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1 = %u\n",
+		dbg_cpu_reason_cnt[12]);
+	pr_notice("(18)Hit bind and VLAN replacement violation = %u\n",
+		dbg_cpu_reason_cnt[13]);
+	pr_notice("(19)Hit bind and keep alive with unicast old-header packet = %u\n",
+		dbg_cpu_reason_cnt[14]);
+	pr_notice("(20)Hit bind and keep alive with multicast new-header packet = %u\n",
+		dbg_cpu_reason_cnt[15]);
+	pr_notice("(21)Hit bind and keep alive with duplicate old-header packet = %u\n",
+		dbg_cpu_reason_cnt[16]);
+	pr_notice("(22)FOE Hit bind & force to CPU = %u\n", dbg_cpu_reason_cnt[17]);
+	pr_notice("(28)Hit bind and exceed MTU =%u\n", dbg_cpu_reason_cnt[18]);
+	pr_notice("(24)Hit bind multicast packet to CPU = %u\n",
+		dbg_cpu_reason_cnt[19]);
+	pr_notice("(25)Hit bind multicast packet to GMAC & CPU = %u\n",
+		dbg_cpu_reason_cnt[20]);
+	pr_notice("(26)Pre bind = %u\n", dbg_cpu_reason_cnt[21]);
+
+	for (i = 0; i < 22; i++)
+		dbg_cpu_reason_cnt[i] = 0;
+	return 0;
+}
+
+int entry_read(struct seq_file *seq, void *v)
+{
+	struct foe_entry *entry;
+	struct foe_entry *entry1;
+	int hash_index;
+	int cnt;
+
+	cnt = 0;
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+		entry = &ppe_foe_base[hash_index];
+		if (entry->bfib1.state == dbg_entry_state) {
+			cnt++;
+			dbg_dump_entry(hash_index, entry);
+		}
+	}
+	pr_notice("PPE0 Total State = %s cnt = %d\n",
+		dbg_entry_state ==
+		0 ? "Invalid" : dbg_entry_state ==
+		1 ? "Unbind" : dbg_entry_state ==
+		2 ? "BIND" : dbg_entry_state ==   3 ? "FIN" : "Unknown", cnt);
+
+	cnt = 0;
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+		entry1 = &ppe1_foe_base[hash_index];
+		if (entry1->bfib1.state == dbg_entry_state) {
+			cnt++;
+			dbg_dump_entry(hash_index, entry1);
+		}
+	}
+	pr_notice("PPE1 Total State = %s cnt = %d\n",
+		dbg_entry_state ==
+		0 ? "Invalid" : dbg_entry_state ==
+		1 ? "Unbind" : dbg_entry_state ==
+		2 ? "BIND" : dbg_entry_state ==   3 ? "FIN" : "Unknown", cnt);
+	return 0;
+}
+
+int setting_read(struct seq_file *seq, void *v)
+{
+	dbg_dump_cr();
+	return 0;
+}
+
+int multicast_read(struct seq_file *seq, void *v)
+{
+	foe_mcast_entry_dump();
+	return 0;
+}
+
+int whitelist_read(struct seq_file *seq, void *v)
+{
+	dump_dport();
+	return 0;
+}
+
+int type_read(struct seq_file *seq, void *v)
+{
+	int i;
+
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if(dst_port[i] != NULL)
+			pr_notice("%s -> %s\n",dst_port[i]->name,
+				dst_port_type[i] == 0 ? "HW_fast" :
+			  	dst_port_type[i] == 1 ? "SW_fast" : "Unknown");
+	}
+	return 0;
+}
+
+int qos_read(struct seq_file *seq, void *v)
+{
+	pr_notice("support hw qos = %d\n", set_fqos);
+
+	return 0;
+}
+
+int mib_read(struct seq_file *seq, void *v)
+{
+	u8 fport, sport, fport1, sport1, i;
+	int hash_index;
+	struct foe_entry *entry;
+	struct foe_entry *entry1;
+	unsigned long pkt_cnt, byte_cnt, pkt_cnt1, byte_cnt1;
+
+	for (i = 0; i < MAX_IF_NUM; i++)
+		hnat_if[i].dev = dst_port[i];
+
+	pkt_cnt = byte_cnt = 0;
+	pkt_cnt1 = byte_cnt1 = 0;
+
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+		entry = &ppe_foe_base[hash_index];
+		entry1 = &ppe1_foe_base[hash_index];
+
+		fport = get_act_dp(entry);
+		sport = get_rxif_idx(entry);
+
+		fport1 = get_act_dp(entry1);
+		sport1 = get_rxif_idx(entry1);
+
+		ppe_mib_dump_ppe0(hash_index, &pkt_cnt, &byte_cnt);
+		ppe_mib_dump_ppe1(hash_index, &pkt_cnt1, &byte_cnt1);
+
+		hnat_if[sport].rx_byte_cnt += byte_cnt;
+		hnat_if[fport].tx_byte_cnt += byte_cnt;
+
+		hnat_if[sport].rx_pkt_cnt += pkt_cnt;
+		hnat_if[fport].tx_pkt_cnt += pkt_cnt;
+
+
+		hnat_if[sport1].rx_byte_cnt += byte_cnt1;
+		hnat_if[fport1].tx_byte_cnt += byte_cnt1;
+
+		hnat_if[sport1].rx_pkt_cnt += pkt_cnt1;
+		hnat_if[fport1].tx_pkt_cnt += pkt_cnt1;
+	}
+
+	for (i = 0; i < 16; i++) {
+		if((hnat_if[i].dev) != NULL) {
+			pr_notice("Interface : %s\n", hnat_if[i].dev->name);
+			pr_notice("Rx pkt cnt =%llu, Rx byte cnt=%llu\n", hnat_if[i].rx_pkt_cnt, hnat_if[i].rx_byte_cnt);
+			pr_notice("Tx pkt cnt =%llu, Tx byte cnt=%llu\n", hnat_if[i].tx_pkt_cnt, hnat_if[i].tx_byte_cnt);
+		}
+	}
+
+	return 0;
+}
+
+int med_read(struct seq_file *seq, void *v)
+{
+
+	struct MED_HNAT_INFO_HOST *med_dmad;
+	unsigned int wdix, rdix, i, j;
+	u32 *p;
+
+	rdix = reg_read(MEDHW_SSR1_DST_RB0_RIDX);
+	wdix = reg_read(MEDHW_SSR1_DST_RB0_WIDX);
+	med_dmad = &med_info_base[0];
+	p = (u32 *)med_dmad;
+	pr_notice("wdix = %x, rdix = %x\n", wdix, rdix);
+	for (i = 0; i < MED_INFO_SIZE; i++) {
+		pr_notice("********** HNAT_INFO_HOST(%d)*********\n", i);
+		for (j = 0; j < 2; j++)
+			pr_notice("%02d: %08X\n", j, *(p + j));
+	}
+
+	return 0;
+}
+
+
+int mdma_read(struct seq_file *seq, void *v)
+{
+	struct MDMA_txdmad *tx_ring;
+	struct MDMA_rxdmad *rx_ring;
+	int i, j;
+	u32 tx_size = 0;
+	u32 rx_size = 0;
+	void __iomem *mdma_base;
+	void __iomem *mdma_base_rx;
+	u32 *p;
+
+	tx_size = reg_read(MDMA_TX_MAX_CNT_0);
+	rx_size = reg_read(MDMA_RX_MAX_CNT_0);
+	mdma_base = ioremap(reg_read(MDMA_TX_BASE_PTR_0), 0x1000);
+	mdma_base_rx = ioremap(reg_read(MDMA_RX_BASE_PTR_0), 0x1000);
+	tx_ring = mdma_base;
+	rx_ring = mdma_base_rx;
+	p = (u32 *)tx_ring;
+	seq_printf(seq, "!!!!!!!!! MDMA TX_RING!!!!!!!\n");
+	for (i = 0; i < tx_size; i++) {
+		for (j = 0; j < 8; j++)
+			seq_printf(seq, "tx indx (%d)%02d: %08X\n", i, j, *(p + j));
+	}
+	p = (u32 *)rx_ring;
+	seq_printf(seq, "!!!!!!!!! MDMA RX_RING!!!!!!!\n");
+	for (i = 0; i < rx_size; i++) {
+		for (j = 0; j < 8; j++)
+			seq_printf(seq, "rx indx (%d)%02d: %08X\n", i, j, *(p + j));
+	}
+
+	iounmap(mdma_base);
+
+	return 0;
+}
+
+int disabling_read(struct seq_file *seq, void *v)
+{
+	seq_printf(seq, "disabling_hwnat=%d\n", disabling_hwnat);
+	return 0;
+}
+
+static int cpu_reason_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cpu_reason_read, NULL);
+}
+
+static int entry_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, entry_read, NULL);
+}
+
+static int setting_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, setting_read, NULL);
+}
+
+static int multicast_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, multicast_read, NULL);
+}
+
+static int whitelist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, whitelist_read, NULL);
+}
+
+static int type_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, type_read, NULL);
+}
+
+static int qos_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qos_read, NULL);
+}
+
+static int mib_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mib_read, NULL);
+}
+
+static int med_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, med_read, NULL);
+}
+
+static int mdma_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mdma_read, NULL);
+}
+
+static int disabling_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, disabling_read, NULL);
+}
+
+static const struct file_operations cpu_reason_fops = {
+	.owner = THIS_MODULE,
+	.open = cpu_reason_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = cpu_reason_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_entry_fops = {
+	.owner = THIS_MODULE,
+	.open = entry_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = entry_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_setting_fops = {
+	.owner = THIS_MODULE,
+	.open = setting_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = setting_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_multicast_fops = {
+	.owner = THIS_MODULE,
+	.open = multicast_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = multicast_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_whitelist_fops = {
+	.owner = THIS_MODULE,
+	.open = whitelist_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = whitelist_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_type_fops = {
+	.owner = THIS_MODULE,
+	.open = type_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = type_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_qos_fops = {
+	.owner = THIS_MODULE,
+	.open = qos_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = qos_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_mib_fops = {
+	.owner = THIS_MODULE,
+	.open = mib_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = mib_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_med_fops = {
+	.owner = THIS_MODULE,
+	.open = med_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = med_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_mdma_fops = {
+	.owner = THIS_MODULE,
+	.open = mdma_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = mdma_write,
+	.release = single_release
+};
+
+static const struct file_operations hnat_disabling_fops = {
+	.owner = THIS_MODULE,
+	.open = disabling_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = disabling_write,
+	.release = single_release
+};
+
+int hnat_debug_proc_init(void)
+{
+	if (!hnat_proc_reg_dir)
+		hnat_proc_reg_dir = proc_mkdir(HNAT_PROCREG_DIR, NULL);
+
+	proc_cpu_reason = proc_create(PROCREG_CPU_REASON, 0,
+				      hnat_proc_reg_dir, &cpu_reason_fops);
+	if (!proc_cpu_reason)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_CPU_REASON);
+
+	proc_hnat_entry = proc_create(PROCREG_PPE_ENTRY, 0,
+				      hnat_proc_reg_dir, &hnat_entry_fops);
+	if (!proc_hnat_entry)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_ENTRY);
+
+	proc_hnat_setting = proc_create(PROCREG_PPE_SETTING, 0,
+					hnat_proc_reg_dir, &hnat_setting_fops);
+	if (!proc_hnat_setting)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_ENTRY);
+
+	proc_hnat_multicast = proc_create(PROCREG_PPE_MULTICAST, 0,
+					  hnat_proc_reg_dir, &hnat_multicast_fops);
+	if (!proc_hnat_multicast)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_MULTICAST);
+
+	proc_hnat_whitelist = proc_create(PROCREG_PPE_WHITELIST, 0,
+					  hnat_proc_reg_dir, &hnat_whitelist_fops);
+	if (!proc_hnat_whitelist)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_WHITELIST);
+
+	proc_hnat_type = proc_create(PROCREG_PPE_TYPE, 0,
+					  hnat_proc_reg_dir, &hnat_type_fops);
+	if (!proc_hnat_type)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_TYPE);
+
+	proc_hnat_qos = proc_create(PROCREG_PPE_QOS, 0,
+					  hnat_proc_reg_dir, &hnat_qos_fops);
+	if (!proc_hnat_qos)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_QOS);
+
+	proc_hnat_mib = proc_create(PROCREG_PPE_MIB, 0,
+					  hnat_proc_reg_dir, &hnat_mib_fops);
+	if (!proc_hnat_mib)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_MIB);
+
+	proc_hnat_med = proc_create(PROCREG_PPE_MED, 0,
+					  hnat_proc_reg_dir, &hnat_med_fops);
+	if (!proc_hnat_med)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_MDMA);
+
+
+	proc_hnat_mdma = proc_create(PROCREG_PPE_MDMA, 0,
+					  hnat_proc_reg_dir, &hnat_mdma_fops);
+	if (!proc_hnat_mdma)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_PPE_MDMA);
+
+
+	proc_hnat_disabling_hwnat = proc_create(PROCREG_DISABLING_HWNAT, 0,
+					  hnat_proc_reg_dir, &hnat_disabling_fops);
+	if (!proc_hnat_disabling_hwnat)
+		pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DISABLING_HWNAT);
+	disabling_hwnat = 0;
+
+	return 0;
+}
+
+void hnat_debug_proc_exit(void)
+{
+	pr_notice("proc exit\n");
+	if (proc_cpu_reason)
+		remove_proc_entry(PROCREG_CPU_REASON, hnat_proc_reg_dir);
+
+	if (proc_hnat_entry)
+		remove_proc_entry(PROCREG_PPE_ENTRY, hnat_proc_reg_dir);
+
+	if (proc_hnat_setting)
+		remove_proc_entry(PROCREG_PPE_SETTING, hnat_proc_reg_dir);
+
+	if (proc_hnat_multicast)
+		remove_proc_entry(PROCREG_PPE_MULTICAST, hnat_proc_reg_dir);
+
+	if (proc_hnat_whitelist)
+		remove_proc_entry(PROCREG_PPE_WHITELIST, hnat_proc_reg_dir);
+
+	if (proc_hnat_type)
+		remove_proc_entry(PROCREG_PPE_TYPE, hnat_proc_reg_dir);
+
+	if (proc_hnat_qos)
+		remove_proc_entry(PROCREG_PPE_QOS, hnat_proc_reg_dir);
+
+	if (proc_hnat_mib)
+		remove_proc_entry(PROCREG_PPE_MIB, hnat_proc_reg_dir);
+
+	if (proc_hnat_med)
+		remove_proc_entry(PROCREG_PPE_MED, hnat_proc_reg_dir);
+
+	if (proc_hnat_mdma)
+		remove_proc_entry(PROCREG_PPE_MDMA, hnat_proc_reg_dir);
+
+	if (proc_hnat_disabling_hwnat)
+		remove_proc_entry(PROCREG_DISABLING_HWNAT, hnat_proc_reg_dir);
+
+	if (hnat_proc_reg_dir)
+		remove_proc_entry(HNAT_PROCREG_DIR, 0);
+}
+
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_proc.h b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_proc.h
new file mode 100644
index 0000000..b0d5414
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_dbg_proc.h
@@ -0,0 +1,51 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef HNAT_DBG_PROC_H
+#define HNAT_DBG_PROC_H
+
+#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+#define HNAT_PROCREG_DIR             "hnat"
+#define PROCREG_CPU_REASON              "cpu_reason"
+#define PROCREG_PPE_ENTRY               "hnat_entry"
+#define PROCREG_PPE_SETTING             "hnat_setting"
+#define PROCREG_PPE_MULTICAST		"hnat_multicast"
+#define PROCREG_PPE_WHITELIST		"hnat_whitelist"
+#define PROCREG_PPE_TYPE		"hnat_type"
+#define PROCREG_PPE_QOS			"hnat_qos"
+#define PROCREG_PPE_MIB			"hnat_mib"
+#define PROCREG_PPE_MED			"hnat_med"
+#define PROCREG_PPE_MDMA		"hnat_mdma"
+#define PROCREG_DISABLING_HWNAT "disabling_hwnat"
+
+extern unsigned int dbg_cpu_reason_cnt[32];
+extern int hwnat_dbg_entry;
+extern struct foe_entry *ppe_foe_base;
+extern struct foe_entry *ppe1_foe_base;
+
+struct hwnat_interface {
+	struct net_device *dev;
+	unsigned long long rx_byte_cnt;
+	unsigned long long rx_pkt_cnt;
+	unsigned long long tx_byte_cnt;
+	unsigned long long tx_pkt_cnt;
+	unsigned long long rx_mcast_cnt;
+};
+
+int hnat_debug_proc_init(void);
+void hnat_debug_proc_exit(void);
+void dbg_dump_entry(uint32_t index, struct foe_entry *entry);
+void dbg_dump_cr(void);
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_define.h b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_define.h
new file mode 100644
index 0000000..9729bf1
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_define.h
@@ -0,0 +1,159 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef _FOE_DEFINE_WANTED
+#define _FOE_DEFINE_WANTED
+
+#include "frame_engine.h"
+#include "ra_nat.h"
+
+extern unsigned int hnat_chip_name;
+extern unsigned int fe_feature;
+extern struct foe_entry *ppe_foe_base;
+
+#if defined(CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT)
+extern struct MED_HNAT_INFO_HOST *med_info_base;
+#endif
+
+#if defined(CONFIG_RA_HW_NAT_PACKET_SAMPLING)
+extern struct ps_entry *ppe_ps_base;
+#endif
+
+extern u8 USE_3T_UDP_FRAG;
+extern int DP_GMAC1;
+extern int DP_GMAC2;
+
+extern unsigned int hnat_chip_name;
+extern unsigned int fe_feature;
+
+extern dev_t dev_hnat;
+extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
+extern int (*ppe_hook_rx_wifi)(struct sk_buff *skb);
+extern int (*ppe_hook_tx_wifi)(struct sk_buff *skb, int gmac_no);
+extern int (*ppe_hook_rx_modem)(struct sk_buff *skb, u8 drop, u8 channel);
+extern int (*ppe_hook_tx_modem)(struct sk_buff *skb, u32 net_type, u32 channel_id);
+extern int (*ppe_hook_rx_rndis)(struct sk_buff *skb);
+extern int (*ppe_hook_tx_rndis)(struct sk_buff *skb);
+extern int (*ppe_hook_rx_eth)(struct sk_buff *skb);
+extern int (*ppe_hook_tx_eth)(struct sk_buff *skb, int gmac_no);
+extern int (*ppe_hook_rx_ext)(struct sk_buff *skb);
+extern int (*ppe_hook_tx_ext)(struct sk_buff *skb, int gmac_no);
+extern void (*ppe_dev_register_hook)(struct net_device *dev);
+extern void (*ppe_dev_unregister_hook)(struct net_device *dev);
+extern int (*ppe_get_dev_stats)(struct net_device *dev, struct rtnl_link_stats64 *storage);
+
+/* HOOK ID */
+#define HWNAT_HOOK_ID_ETH 0
+#define HWNAT_HOOK_ID_MODEM 1
+#define HWNAT_HOOK_ID_WIFI 2
+#define HWNAT_HOOK_ID_RNDIS 3
+#define HWNAT_HOOK_ID_EXT 4
+
+/* DIR ID */
+#define HWNAT_DIR_ID_ALL 0
+#define HWNAT_DIR_ID_RX 1
+#define HWNAT_DIR_ID_TX 2
+
+extern u8 bind_dir;
+extern u16 wan_vid;
+extern u16 lan_vid;
+extern struct foe_entry *ppe_virt_foe_base_tmp;
+extern struct foe_entry *ppe1_virt_foe_base_tmp;
+#if defined(CONFIG_RAETH_QDMA)
+extern unsigned int M2Q_table[64];
+extern unsigned int lan_wan_separate;
+#endif
+extern struct hwnat_ac_args ac_info[64];
+extern u32 debug_level;
+extern u8 set_fqos;
+extern u8 xlat_enable;
+
+extern struct net_device *dst_port[MAX_IF_NUM];
+extern u8 dst_port_type[MAX_IF_NUM];
+extern struct ps_entry *ppe_ps_base;
+/*extern struct pkt_parse_result ppe_parse_result;*/
+extern int dbg_cpu_reason;
+
+#ifdef CONFIG_SUPPORT_OPENWRT
+#define DEV_NAME_HNAT_LAN	"eth0"
+#define DEV_NAME_HNAT_WAN	"eth1"
+#else
+#define DEV_NAME_HNAT_LAN	"eth2"
+#define DEV_NAME_HNAT_WAN	"eth3"
+#endif
+
+#define DEV_NAME_HNAT_EDMA0	"edma0"
+#define DEV_NAME_HNAT_EDMA1	"edma1"
+#define DEV_NAME_HNAT_CCCI0	"ccmni0"
+#define DEV_NAME_HNAT_CCCI1	"ccmni1"
+#define DEV_NAME_HNAT_CCCI2	"ccmni2"
+#define DEV_NAME_HNAT_CCCI3	"ccmni3"
+#define DEV_NAME_HNAT_CCCI4	"ccmni4"
+#define DEV_NAME_HNAT_CCCI5	"ccmni5"
+#define DEV_NAME_HNAT_CCCI6	"ccmni6"
+#define DEV_NAME_HNAT_CCCI7	"ccmni7"
+#define DEV_NAME_HNAT_RA0	"ra0"
+#define DEV_NAME_HNAT_RAI0	"rai0"
+#define DEV_NAME_HNAT_RAX0	"rax0"
+#define DEV_NAME_HNAT_APCLI0	"apcli0"
+#define DEV_NAME_HNAT_APCLI1	"apcli1"
+#define DEV_NAME_HNAT_RNDIS0	"rndis0"
+
+//EDMA
+#define DEV_NAME        DEV_NAME_HNAT_EDMA0
+#define DEV2_NAME       DEV_NAME_HNAT_EDMA1
+
+#define AQR_DEV_NAME        "aqr0"
+#define AQR_DEV2_NAME       "aqr1"
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_ioctl.h b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_ioctl.h
new file mode 100755
index 0000000..0d2779e
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_ioctl.h
@@ -0,0 +1,280 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef	__HW_NAT_IOCTL_H__
+#define	__HW_NAT_IOCTL_H__
+
+#include "hnat_config.h"
+
+extern unsigned int hnat_chip_name;
+extern unsigned int fe_feature;
+extern struct net_device	*dst_port[64];
+extern unsigned int debug_PPP;
+
+#define HW_NAT_DUMP_CACHE_ENTRY         (0x24)
+#define HW_NAT_ADD_ENTRY		(0x01)
+#define HW_NAT_DEL_ENTRY		(0x38)
+#define HW_NAT_DUMP_ENTRY		(0x03)
+#define HW_NAT_GET_ALL_ENTRIES		(0x04)
+#define HW_NAT_BIND_ENTRY		(0x05)
+#define HW_NAT_UNBIND_ENTRY		(0x06)
+#define HW_NAT_INVALID_ENTRY		(0x07)
+#define HW_NAT_DEBUG			(0x08)
+
+#define HW_NAT_DROP_ENTRY		(0x36)
+#define HW_NAT_TBL_CLEAR		(0x37)
+
+#define HW_NAT_GET_AC_CNT		(0x09)
+#define HW_NAT_BIND_THRESHOLD		(0x16)
+#define HW_NAT_MAX_ENTRY_LMT		(0x17)
+#define HW_NAT_RULE_SIZE		(0x18)
+#define HW_NAT_KA_INTERVAL		(0x19)
+#define HW_NAT_UB_LIFETIME		(0x1A)
+#define HW_NAT_BIND_LIFETIME		(0x1B)
+#define HW_NAT_BIND_DIRECTION		(0x1C)
+#define HW_NAT_VLAN_ID			(0x1D)
+#define HW_NAT_MCAST_INS		(0x20)
+#define HW_NAT_MCAST_DEL		(0x21)
+#define HW_NAT_MCAST_DUMP		(0x22)
+#define HW_NAT_MIB_DUMP			(0x23)
+#define HW_NAT_MIB_DRAM_DUMP            (0x25)
+#define HW_NAT_MIB_GET			(0x26)
+#define HW_NAT_MIB_GET_ALL_IP	(0x27)
+#define HW_NAT_IPI_CTRL_FROM_EXTIF			(0x50)
+#define HW_NAT_IPI_CTRL_FROM_PPEHIT			(0x51)
+#define HW_NAT_DPORT			(0x52)
+#define HW_NAT_CLEAR_HOOK  		(0x53)
+#define HW_NAT_RESTORE_HOOK		(0x54)
+
+
+#define HW_NAT_DEVNAME			"hwnat0"
+#define HW_NAT_MAJOR			(121)
+
+/* extern struct hwnat_ac_args ac_info[64]; */
+extern struct mib_entry *ppe_mib_base;
+enum hwnat_status {
+	HWNAT_SUCCESS = 0,
+	HWNAT_FAIL = 1,
+	HWNAT_ENTRY_NOT_FOUND = 2
+};
+
+struct hwnat_tuple {
+	unsigned short hash_index;
+	unsigned short rmt;
+	unsigned short frag;
+	unsigned short checksum;
+	unsigned short ttl;
+	unsigned short rxif_idx;
+	unsigned int pkt_type;
+	unsigned int etype;
+	unsigned int   is_udp;
+	unsigned int fport;
+	unsigned int fqos;
+	unsigned int qid;
+	/* egress layer2 */
+	unsigned char dmac[6];
+	unsigned char smac[6];
+	unsigned short vlan1;
+	unsigned short vlan2;
+	unsigned short pppoe_id;
+
+	/* ingress layer3 */
+	unsigned int ing_sipv4;
+	unsigned int ing_dipv4;
+
+	unsigned int ing_sipv6_0;
+	unsigned int ing_sipv6_1;
+	unsigned int ing_sipv6_2;
+	unsigned int ing_sipv6_3;
+
+	unsigned int ing_dipv6_0;
+	unsigned int ing_dipv6_1;
+	unsigned int ing_dipv6_2;
+	unsigned int ing_dipv6_3;
+
+	/* egress layer3 */
+	unsigned int eg_sipv4;
+	unsigned int eg_dipv4;
+
+	unsigned int eg_sipv6_0;
+	unsigned int eg_sipv6_1;
+	unsigned int eg_sipv6_2;
+	unsigned int eg_sipv6_3;
+
+	unsigned int eg_dipv6_0;
+	unsigned int eg_dipv6_1;
+	unsigned int eg_dipv6_2;
+	unsigned int eg_dipv6_3;
+	unsigned char prot;
+	/*ingress layer4*/
+	unsigned short ing_sp;
+	unsigned short ing_dp;
+
+	/*egress layer4*/
+	unsigned short eg_sp;
+	unsigned short eg_dp;
+
+	unsigned char	ipv6_flowlabel;
+	unsigned char   pppoe_act;
+	unsigned int    vlan_layer;
+	unsigned char	dst_port;
+	unsigned int    dscp;
+	enum hwnat_status result;
+};
+
+struct hwnat_args {
+	enum hwnat_status result;
+	unsigned int entry_num;
+	unsigned int num_of_entries;
+	unsigned int debug;
+	unsigned int entry_state; /* invalid=0, unbind=1, bind=2, fin=3 */
+	struct hwnat_tuple entries[0];
+};
+
+/*hnat qos*/
+struct hwnat_qos_args {
+	unsigned int enable:1;
+	unsigned int up:3;
+	unsigned int weight:3;	/*UP resolution */
+	unsigned int dscp:6;
+	unsigned int dscp_set:3;
+	unsigned int vpri:3;
+	unsigned int ac:2;
+	unsigned int mode:2;
+	unsigned int weight0:4;	/*WRR 4 queue weight */
+	unsigned int weight1:4;
+	unsigned int weight2:4;
+	unsigned int weight3:4;
+	enum hwnat_status result;
+};
+
+/*hnat config*/
+struct hwnat_config_args {
+	unsigned int bind_threshold:16;
+	unsigned int foe_full_lmt:14;
+	unsigned int foe_half_lmt:14;
+	unsigned int foe_qut_lmt:14;
+	unsigned int pre_acl:9;
+	unsigned int pre_meter:9;
+	unsigned int pre_ac:9;
+	unsigned int post_meter:9;
+	unsigned int post_ac:9;
+	unsigned int foe_tcp_ka:8;	/*unit 4 sec */
+	unsigned int foe_udp_ka:8;	/*unit 4 sec */
+	unsigned int foe_unb_dlta:8;	/*unit 1 sec */
+	unsigned int foe_tcp_dlta:16;	/*unit 1 sec */
+	unsigned int foe_udp_dlta:16;	/*unit 1 sec */
+	unsigned int foe_fin_dlta:16;	/*unit 1 sec */
+	unsigned int wan_vid:16;
+	unsigned int lan_vid:16;
+	unsigned int bind_dir:2;	/* 0=upstream, 1=downstream, 2=bi-direction */
+	enum hwnat_status result;
+};
+
+struct hwnat_ac_args {
+	unsigned int ag_index;
+	unsigned long long ag_byte_cnt;
+	unsigned long long ag_pkt_cnt;
+	enum hwnat_status result;
+};
+
+extern struct hwnat_ac_args ac_info[64];
+
+struct hwnat_mcast_args {
+	unsigned int    mc_vid:16;
+	unsigned int    mc_px_en:4;
+	unsigned int    valid:1;
+	unsigned int    rev2:3;
+	unsigned int    mc_px_qos_en:4;
+	unsigned int    mc_qos_qid:4;
+	unsigned char	dst_mac[6];
+};
+
+struct hwnat_mib_args {
+	unsigned int    entry_num:16;
+};
+
+struct hwnat_ipi_args {
+	unsigned int hnat_ipi_enable;
+	unsigned int drop_pkt;
+	unsigned int queue_thresh;
+	unsigned int ipi_cnt_mod;
+};
+
+struct hwnat_mib_ip {
+	union {
+		unsigned int ipv4_addr;
+		unsigned int ipv6_addr[4];
+		}ip;
+	unsigned int is_ipv4;
+	unsigned long tx_bytes;
+	unsigned long rx_bytes;
+	unsigned long tx_packets;
+	unsigned long rx_packets;
+};
+
+#define HWNAT_MAX_MIB_IP_ENTRY_NUM (512)
+struct hwnat_mib_all_ip_args {
+	unsigned int entry_num;
+	struct hwnat_mib_ip entries[HWNAT_MAX_MIB_IP_ENTRY_NUM];
+};
+int get_ppe_mib_ip(struct hwnat_mib_all_ip_args *all_ip);
+int ppe_get_agcnt(struct hwnat_ac_args *opt3);
+int reply_entry_idx(struct hwnat_tuple *opt, unsigned int entry_num);
+void ppe_mib_dump(unsigned int entry_num, unsigned long *pkt_cnt, unsigned long *byte_cnt);
+void ppe_mib_dram_dump(unsigned int entry_num);
+int get_ppe_mib(struct hwnat_tuple *opt, unsigned long *tx_pkt_cnt, unsigned long *tx_byte_cnt,
+		unsigned long *rx_pkt_cnt, unsigned long *rx_byte_cnt);
+int ppe_tbl_clear(void);
+void dump_dport(void);
+int ppe_set_ka_interval(unsigned char tcp_ka, unsigned char udp_ka);
+int ppe_set_464_enable(int enable);
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_swdvfs.c b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_swdvfs.c
new file mode 100644
index 0000000..49f01a1
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hnat_swdvfs.c
@@ -0,0 +1,770 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2020 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2020 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+
+#include "frame_engine.h"
+#include "util.h"
+#include "hnat_config.h"
+
+/* #define DD printk("%s %d\n", __FUNCTION__, __LINE__); */
+
+u64 lower_tput[] = {10, 30, 50, ULLONG_MAX};		/* Tput threshold */
+u64 upper_tput[] = {20, 40, 60, ULLONG_MAX};		/* in MBps */
+
+u64 lower_pcnt[] = { 80,  800, 30000, ULLONG_MAX};	/* packet cnt threshold */
+u64 upper_pcnt[] = {100, 1000, 40000, ULLONG_MAX};
+
+int vcoreopp_to_dramopp[] = {0, 2, 5, 6};	/* vcore to dram opp mapping tbl*/
+
+#ifndef CONFIG_HW_NAT_SW_DVFS_DEFAULT_OPP
+#define CONFIG_HW_NAT_SW_DVFS_DEFAULT_OPP	3	/* the highest vcore */
+#endif
+#define RETRY_COUNT				100	/* in ms */
+
+static bool			dvfs_en = false;	/* disable by default */
+static int			force_opp = 0xFF;	/* invalid by default */
+
+void __iomem 			*netsys_base;
+struct timer_list		dvfs_timer;
+int				dvfs_current_opp;
+int				dvfs_debug_level = 1;
+int				dvfs_timeout = 1;	/* unite: HZ */
+
+u64 lower_tput_force[sizeof(lower_tput)];		/* threshold. for debug */
+u64 upper_tput_force[sizeof(upper_tput)];   		/* tput in MBps */
+u64 lower_pcnt_force[sizeof(lower_pcnt)];		/* threshold. for debug */
+u64 upper_pcnt_force[sizeof(upper_pcnt)];   		/* packet cnt */
+
+static int dvfs_print(const char *fmt, ...)
+{
+	char buf[256];
+	va_list args;
+
+	va_start(args, fmt);
+	if (dvfs_debug_level) {
+		vsnprintf(buf, 256, fmt, args);
+		printk("%s", buf);
+	}
+	va_end(args);
+	return 0;
+}
+
+static int get_new_opp(u64 value, u64 *threshold, int count) /* Mbps*/
+{
+	int i;
+
+	/* favor lower opp */
+	for (i=0; i<count; i++) {
+		if (threshold[i] >= value)
+			return i;
+	}
+	return i;
+}
+
+static u64 get_bytes_count(void)
+{
+	int i, ag_idx[] = {1, 2, 8, 9, 10};
+	unsigned long long bcnt = 0;
+
+	for (i=0; i<sizeof(ag_idx)/sizeof(int); i++) {
+		bcnt += reg_read(AC_BASE + ag_idx[i] * 16);
+		bcnt += ((unsigned long long)reg_read(AC_BASE + ag_idx[i] * 16 + 4)) << 32;
+	}
+
+	/* in MBps */
+	return (bcnt >> 20);
+}
+
+static u64 get_packet_count(void)
+{
+	int i, ag_idx[] = {1, 2, 8, 9, 10};
+	unsigned long long pcnt = 0;
+
+	for (i=0; i<sizeof(ag_idx)/sizeof(int); i++)
+		pcnt += reg_read(AC_BASE + ag_idx[i] * 16 + 8);
+
+	/* in MBps */
+	return pcnt;
+}
+
+
+static int update_opp_by_tput(int present_opp)
+{
+	u64 tput = get_bytes_count() / dvfs_timeout;
+
+	dvfs_print("netsys dvfs: tput = %lld MBps\n", tput);
+
+	/* favor low opp */
+	if (tput < lower_tput[present_opp])
+		return get_new_opp(tput, &lower_tput[0], sizeof(lower_tput)/sizeof(u64));
+
+	if (tput > upper_tput[present_opp])
+		return get_new_opp(tput, &upper_tput[0], sizeof(upper_tput)/sizeof(u64));
+
+	return present_opp;
+}
+
+static int update_opp_by_pcnt(int present_opp)
+{
+	u64 pcnt = get_packet_count() / dvfs_timeout;
+
+	dvfs_print("netsys dvfs: packet = %lld\n", pcnt);
+
+	/* favor low opp */
+	if (pcnt < lower_pcnt[present_opp])
+		return get_new_opp(pcnt, &lower_pcnt[0], sizeof(lower_pcnt)/sizeof(u64));
+
+	if (pcnt > upper_pcnt[present_opp])
+		return get_new_opp(pcnt, &upper_pcnt[0], sizeof(upper_pcnt)/sizeof(u64));
+
+	return present_opp;
+}
+
+static int change_to_opp(int opp)
+{
+	u32 val;
+
+	int retry = 0;
+
+	if (opp >= sizeof(vcoreopp_to_dramopp)/sizeof(int)) {
+		pr_err("netsys swdvfs: sanity error\n");
+		return -1;
+	}
+
+	val = reg_read(NETSYS_DVFS_CFG1);
+	if (val & NETSYS_SW_VC_DVFS_ACK || val & NETSYS_SW_BW_DVFS_ACK) {
+		pr_err("netsys swdvfs: ack error\n");
+		return -1;
+	}
+
+	/* handle vcore */
+	val &= 0xFF8FFFFF;
+	val |= (((opp & 0x00000007) << NETSYS_SW_VC_DVFS_VAL_OFFSET)) | NETSYS_SW_VC_DVFS_REQ;
+
+	/* handle dram bw */
+	val &= 0x8FFFFFFF;
+	val |= (((vcoreopp_to_dramopp[opp] & 0x00000007) << NETSYS_SW_BW_DVFS_VAL_OFFSET)) | NETSYS_SW_BW_DVFS_REQ;
+
+	reg_write(NETSYS_DVFS_CFG1, val);
+
+	/* wait for ack */
+	val = reg_read(NETSYS_DVFS_CFG1);
+
+	while ((val & NETSYS_SW_VC_DVFS_ACK) ||
+		(val & NETSYS_SW_BW_DVFS_ACK) ) {
+		if (retry++ >= RETRY_COUNT) {
+			pr_err("netsys swdvfs: ack timeout\n");
+			return -1;
+		}
+
+		udelay(1000);
+		val = reg_read(NETSYS_DVFS_CFG1);
+	}
+
+	/* clear req */
+	val &= ~(NETSYS_SW_VC_DVFS_REQ | NETSYS_SW_BW_DVFS_REQ);
+	reg_write(NETSYS_DVFS_CFG1, val);
+
+	return 0;
+}
+
+static void dvfs_do(struct timer_list *t)
+{
+	int latest_opp;
+
+	/* check /proc/force_opp. Opp range is between 0 to 3 */
+	if (force_opp >= 0 && force_opp < sizeof(upper_tput)/sizeof(u64)) {
+		latest_opp = force_opp;
+	} else {
+		int latest_tput_opp = update_opp_by_tput(dvfs_current_opp);
+		int latest_pcnt_opp = update_opp_by_pcnt(dvfs_current_opp);
+
+		dvfs_print("netsys dvfs: latest_tput_opp = %d\n", latest_tput_opp);
+		dvfs_print("netsys dvfs: latest_pcnt_opp = %d\n", latest_pcnt_opp);
+
+		latest_opp = max(latest_tput_opp, latest_pcnt_opp);
+	}
+
+	dvfs_print("netsys dvfs: latest_opp = %d\n", latest_opp);
+
+	if (latest_opp != dvfs_current_opp) {
+		dvfs_print("netsys dvfs: old_opp:%d new_opp:%d \n", dvfs_current_opp, latest_opp);
+
+		dvfs_current_opp = latest_opp;
+
+		change_to_opp(dvfs_current_opp);
+	}
+
+	if (dvfs_en)
+		mod_timer(&dvfs_timer, jiffies + HZ * dvfs_timeout);	/* setup next timer */
+
+	return;
+}
+
+static void dvfs_hw_enable(bool enable)
+{
+	u32 val;
+
+	if (enable){
+		val = reg_read(NETSYS_DVFS_CFG1);
+		val = val | (NETSYS_SW_VC_DVFS_EN | NETSYS_SW_BW_DVFS_EN);
+		reg_write(NETSYS_DVFS_CFG1, val);
+
+		val = reg_read(NETSYS_DVFS_CFG0);
+		val = val | NETSYS_DVFS_EN;
+		reg_write(NETSYS_DVFS_CFG0, val);
+	} else {
+		val = reg_read(NETSYS_DVFS_CFG0);
+		val = val & ~(NETSYS_DVFS_EN);
+		reg_write(NETSYS_DVFS_CFG0, val);
+
+		val = reg_read(NETSYS_DVFS_CFG1);
+		val = val & ~(NETSYS_SW_VC_DVFS_EN | NETSYS_SW_BW_DVFS_EN);
+		reg_write(NETSYS_DVFS_CFG1, val);
+	}
+}
+
+static void change_threshold(void)
+{
+	if (upper_tput_force[2]) {
+		lower_tput[0] = lower_tput_force[0];
+		lower_tput[1] = lower_tput_force[1];
+		lower_tput[2] = lower_tput_force[2];
+		upper_tput[0] = upper_tput_force[0];
+		upper_tput[1] = upper_tput_force[1];
+		upper_tput[2] = upper_tput_force[2];
+	}
+
+	if (upper_pcnt_force[2]) {
+		lower_pcnt[0] = lower_pcnt_force[0];
+		lower_pcnt[1] = lower_pcnt_force[1];
+		lower_pcnt[2] = lower_pcnt_force[2];
+		upper_pcnt[0] = upper_pcnt_force[0];
+		upper_pcnt[1] = upper_pcnt_force[1];
+		upper_pcnt[2] = upper_pcnt_force[2];
+	}
+}
+
+/* only used in fini/init */
+static void backto_default_opp(void)
+{
+	dvfs_hw_enable(true);
+	dvfs_current_opp = CONFIG_HW_NAT_SW_DVFS_DEFAULT_OPP;
+	change_to_opp(CONFIG_HW_NAT_SW_DVFS_DEFAULT_OPP);
+	dvfs_hw_enable(false);
+}
+
+/* PROCFS */
+#define PROCREG_DVFS_EN			"dvfs_en"
+#define PROCREG_FORCE_OPP		"dvfs_force_opp"
+#define PROCREG_FORCE_TPUT_THRESHOLD	"dvfs_force_tput_thrsh"
+#define PROCREG_FORCE_PCNT_THRESHOLD	"dvfs_force_pcnt_thrsh"
+#define PROCREG_DVFS_TIMEOUT		"dvfs_timeout"
+#define PROCREG_DVFS_DEBUG_LEVEL	"dvfs_debug_level"
+
+extern struct proc_dir_entry 	*hnat_proc_reg_dir;
+static struct proc_dir_entry 	*proc_force_opp;
+static struct proc_dir_entry 	*proc_force_tput_threshold;
+static struct proc_dir_entry 	*proc_force_pcnt_threshold;
+static struct proc_dir_entry 	*proc_dvfs_en;
+static struct proc_dir_entry 	*proc_dvfs_timeout;
+static struct proc_dir_entry 	*proc_dvfs_debug_level;
+
+static int dvfs_en_read(struct seq_file *seq, void *v)
+{
+	pr_info("dvfs_en=%d\n", dvfs_en);
+	return 0;
+}
+
+static int dvfs_en_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dvfs_en_read, NULL);
+}
+
+static ssize_t dvfs_en_write(struct file *file, const char __user *buffer, size_t count, loff_t *data)
+{
+	char buf[32];
+	int len = count;
+	long arg0 = 0;
+	int ret;
+
+	if (len >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	ret = kstrtol(buf, 10, &arg0);
+
+	dvfs_en = arg0 ? true : false;
+
+	if (dvfs_en) {
+		change_threshold();	/* change threshold if possible */
+
+		dvfs_hw_enable(true);
+
+		/* start the timer for dvfs activity */
+		mod_timer(&dvfs_timer, jiffies + HZ * dvfs_timeout);
+	} else {
+		del_timer_sync(&dvfs_timer);
+
+		backto_default_opp();
+
+		dvfs_hw_enable(false);
+	}
+
+	return len;
+}
+
+static ssize_t dvfs_timeout_write(struct file *file, const char __user *buffer, size_t count, loff_t *data)
+{
+	char buf[32];
+	int len = count;
+	long arg0 = 0;
+	int ret;
+
+	if (len >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	ret = kstrtol(buf, 10, &arg0);
+	dvfs_timeout = arg0;
+
+	/* MAX 10 secs. min 1 secs */
+	if (dvfs_timeout > 10)
+		dvfs_timeout = 10;
+	if (dvfs_timeout <= 0)
+		dvfs_timeout = 1;
+
+	return len;
+}
+
+static ssize_t dvfs_debug_level_write(struct file *file, const char __user *buffer, size_t count, loff_t *data)
+{
+	char buf[32];
+	int len = count;
+	long arg0 = 0;
+	int ret;
+
+	if (len >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	ret = kstrtol(buf, 10, &arg0);
+	dvfs_debug_level = arg0 ? 1 : 0;
+
+	return len;
+}
+
+static int force_opp_read(struct seq_file *seq, void *v)
+{
+	pr_info("force_opp=%d\n", force_opp);	/* just simply printk */
+	return 0;
+}
+
+static int dvfs_debug_level_read(struct seq_file *seq, void *v)
+{
+	pr_info("dvfs_debug_level=%d\n", dvfs_debug_level);
+	return 0;
+}
+
+static int dvfs_timeout_read(struct seq_file *seq, void *v)
+{
+	pr_info("dvfs_timeout=%d\n", dvfs_timeout);
+	return 0;
+}
+
+static int force_tput_threshold_read(struct seq_file *seq, void *v)
+{
+	pr_info("as-is:\n");
+	pr_info("lower_tput:%lld %lld %lld\n", lower_tput[0], lower_tput[1], lower_tput[2]);
+	pr_info("upper_tput:%lld %lld %lld\n", upper_tput[0], upper_tput[1], upper_tput[2]);
+	pr_info("to-be:\n");
+	pr_info("lower_tput:%lld %lld %lld\n", lower_tput_force[0], lower_tput_force[1], lower_tput_force[2]);
+	pr_info("upper_tput:%lld %lld %lld\n", upper_tput_force[0], upper_tput_force[1], upper_tput_force[2]);
+
+	return 0;
+}
+
+
+static int force_pcnt_threshold_read(struct seq_file *seq, void *v)
+{
+	pr_info("as-is:\n");
+	pr_info("lower_pcnt:%lld %lld %lld\n", lower_pcnt[0], lower_pcnt[1], lower_pcnt[2]);
+	pr_info("upper_pcnt:%lld %lld %lld\n", upper_pcnt[0], upper_pcnt[1], upper_pcnt[2]);
+	pr_info("to-be:\n");
+	pr_info("lower_pcnt:%lld %lld %lld\n", lower_pcnt_force[0], lower_pcnt_force[1], lower_pcnt_force[2]);
+	pr_info("upper_pcnt:%lld %lld %lld\n", upper_pcnt_force[0], upper_pcnt_force[1], upper_pcnt_force[2]);
+
+	return 0;
+}
+
+ssize_t force_tput_threshold_write(struct file *file, const char __user *buffer, size_t count, loff_t *data)
+{
+	char buf[64];
+	int len = count;
+	long arg0 = 0;
+	int ret;
+
+	char * const delim = " ";
+	char *token, *cur = buf;
+
+	if (len >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	/* ugly actually. only for debug. */
+	upper_tput_force[2] = 0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	lower_tput_force[0] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	lower_tput_force[1] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	lower_tput_force[2] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	upper_tput_force[0] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+
+	ret = kstrtol(token, 10, &arg0);
+	upper_tput_force[1] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	upper_tput_force[2] = arg0;
+
+failed:
+	return len;
+}
+
+ssize_t force_pcnt_threshold_write(struct file *file, const char __user *buffer, size_t count, loff_t *data)
+{
+	char buf[64];
+	int len = count;
+	long arg0 = 0;
+	int ret;
+
+	char * const delim = " ";
+	char *token, *cur = buf;
+
+	if (len >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	/* ugly actually. only for debug. */
+	upper_pcnt_force[2] = 0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	lower_pcnt_force[0] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	lower_pcnt_force[1] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	lower_pcnt_force[2] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	upper_pcnt_force[0] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+
+	ret = kstrtol(token, 10, &arg0);
+	upper_pcnt_force[1] = arg0;
+
+	token = strsep(&cur, delim);
+	if (!token)
+		goto failed;
+	ret = kstrtol(token, 10, &arg0);
+	upper_pcnt_force[2] = arg0;
+
+failed:
+	return len;
+}
+
+
+static int force_opp_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, force_opp_read, NULL);
+}
+
+static int force_tput_threshold_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, force_tput_threshold_read, NULL);
+}
+
+static int force_pcnt_threshold_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, force_pcnt_threshold_read, NULL);
+}
+
+static int dvfs_timeout_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dvfs_timeout_read, NULL);
+}
+
+static int dvfs_debug_level_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dvfs_debug_level_read, NULL);
+}
+
+ssize_t force_opp_write(struct file *file, const char __user *buffer, size_t count, loff_t *data)
+{
+	char buf[32];
+	int len = count;
+	long arg0 = 0;
+	int ret;
+
+	if (len >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	ret = kstrtol(buf, 10, &arg0);
+
+	force_opp = arg0;
+
+	return len;
+}
+
+
+
+static const struct file_operations dvfs_en_fops = {
+	.owner = THIS_MODULE,
+	.open = dvfs_en_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = dvfs_en_write,
+	.release = single_release
+};
+
+static const struct file_operations force_opp_fops = {
+	.owner = THIS_MODULE,
+	.open = force_opp_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = force_opp_write,
+	.release = single_release
+};
+
+static const struct file_operations force_tput_threshold_fops = {
+	.owner = THIS_MODULE,
+	.open = force_tput_threshold_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = force_tput_threshold_write,
+	.release = single_release
+};
+
+static const struct file_operations force_pcnt_threshold_fops = {
+	.owner = THIS_MODULE,
+	.open = force_pcnt_threshold_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = force_pcnt_threshold_write,
+	.release = single_release
+};
+
+static const struct file_operations dvfs_timeout_fops = {
+	.owner = THIS_MODULE,
+	.open = dvfs_timeout_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = dvfs_timeout_write,
+	.release = single_release
+};
+
+static const struct file_operations dvfs_debug_level_fops = {
+	.owner = THIS_MODULE,
+	.open = dvfs_debug_level_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = dvfs_debug_level_write,
+	.release = single_release
+};
+
+static void create_procfs(void)
+{
+        proc_dvfs_en = proc_create(PROCREG_DVFS_EN, 0,
+                                      hnat_proc_reg_dir, &dvfs_en_fops);
+        if (!proc_dvfs_en)
+                pr_err("!! FAIL to create %s PROC !!\n", PROCREG_DVFS_EN);
+
+        proc_force_opp = proc_create(PROCREG_FORCE_OPP, 0,
+                                      hnat_proc_reg_dir, &force_opp_fops);
+        if (!proc_force_opp)
+                pr_err("!! FAIL to create %s PROC !!\n", PROCREG_FORCE_OPP);
+
+        proc_force_tput_threshold = proc_create(PROCREG_FORCE_TPUT_THRESHOLD, 0,
+                                      hnat_proc_reg_dir, &force_tput_threshold_fops);
+        if (!proc_force_tput_threshold)
+                pr_err("!! FAIL to create %s PROC !!\n", PROCREG_FORCE_TPUT_THRESHOLD);
+
+        proc_force_pcnt_threshold = proc_create(PROCREG_FORCE_PCNT_THRESHOLD, 0,
+                                      hnat_proc_reg_dir, &force_pcnt_threshold_fops);
+        if (!proc_force_pcnt_threshold)
+                pr_err("!! FAIL to create %s PROC !!\n", PROCREG_FORCE_PCNT_THRESHOLD);
+
+	proc_dvfs_timeout = proc_create(PROCREG_DVFS_TIMEOUT, 0,
+                                      hnat_proc_reg_dir, &dvfs_timeout_fops);
+        if (!proc_dvfs_timeout)
+                pr_err("!! FAIL to create %s PROC !!\n", PROCREG_DVFS_TIMEOUT);
+
+	proc_dvfs_debug_level = proc_create(PROCREG_DVFS_DEBUG_LEVEL, 0,
+                                      hnat_proc_reg_dir, &dvfs_debug_level_fops);
+        if (!proc_dvfs_debug_level)
+                pr_err("!! FAIL to create %s PROC !!\n", PROCREG_DVFS_DEBUG_LEVEL);
+
+}
+
+static void fini_procfs(void)
+{
+	if (proc_dvfs_en)
+                remove_proc_entry(PROCREG_DVFS_EN, hnat_proc_reg_dir);
+	if (proc_force_opp)
+                remove_proc_entry(PROCREG_FORCE_OPP, hnat_proc_reg_dir);
+	if (proc_force_tput_threshold)
+                remove_proc_entry(PROCREG_FORCE_TPUT_THRESHOLD, hnat_proc_reg_dir);
+	if (proc_force_pcnt_threshold)
+                remove_proc_entry(PROCREG_FORCE_PCNT_THRESHOLD, hnat_proc_reg_dir);
+	if (proc_dvfs_timeout)
+                remove_proc_entry(PROCREG_DVFS_TIMEOUT, hnat_proc_reg_dir);
+	if (proc_dvfs_debug_level)
+                remove_proc_entry(PROCREG_DVFS_DEBUG_LEVEL, hnat_proc_reg_dir);
+}
+
+/* init/fini */
+void sw_dvfs_init(void)
+{
+	create_procfs();
+
+	netsys_base = ioremap(MTK_ETHDMA_BASE, 0x1000);
+	if (!netsys_base)
+		return;
+
+	backto_default_opp();
+
+	/* setup dvfs timer */
+	timer_setup(&dvfs_timer, dvfs_do, 0);
+}
+
+void sw_dvfs_fini(void)
+{
+	/* remove timer */
+	del_timer_sync(&dvfs_timer);
+
+	/* back to default opp */
+	backto_default_opp();
+
+	fini_procfs();
+
+	iounmap(netsys_base);
+}
+
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_config.h b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_config.h
new file mode 100644
index 0000000..5f72f3e
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_config.h
@@ -0,0 +1,257 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _HNAT_CONFIG_WANTED
+#define _HNAT_CONFIG_WANTED
+
+#include "raeth_config.h"
+
+#if defined(CONFIG_ARCH_MT7622)
+#define USE_UDP_FRAG
+#endif
+
+#ifdef CONFIG_RALINK_MT7620
+#define MT7620_HWNAT	BIT(0)
+#else
+#define MT7620_HWNAT	(0)
+#endif
+
+#ifdef CONFIG_RALINK_MT7621
+#define MT7621_HWNAT	BIT(1)
+#else
+#define MT7621_HWNAT	(0)
+#endif
+#ifdef CONFIG_ARCH_MT7622
+#define MT7622_HWNAT	BIT(2)
+#else
+#define MT7622_HWNAT	(0)
+#endif
+#ifdef CONFIG_ARCH_MT7623
+#define MT7623_HWNAT	BIT(3)
+#else
+#define MT7623_HWNAT	(0)
+#endif
+#ifdef CONFIG_MACH_LEOPARD
+#define LEOPARD_HWNAT	BIT(4)
+#else
+#define LEOPARD_HWNAT	(0)
+#endif
+
+#ifdef	CONFIG_RAETH_GMAC2
+#define GE2_SUPPORT	BIT(0)
+#else
+#define GE2_SUPPORT	(0)
+#endif
+
+#ifdef	CONFIG_RA_HW_NAT_IPV6
+#define HNAT_IPV6	BIT(1)
+#else
+#define HNAT_IPV6	(0)
+#endif
+
+#ifdef	CONFIG_RAETH_HW_VLAN_TX
+#define HNAT_VLAN_TX	BIT(2)
+#else
+#define HNAT_VLAN_TX	(0)
+#endif
+
+#ifdef	CONFIG_PPE_MCAST
+#define HNAT_MCAST	BIT(3)
+#else
+#define HNAT_MCAST	(0)
+#endif
+
+#ifdef	CONFIG_RAETH_QDMA
+#define HNAT_QDMA	BIT(4)
+#else
+#define HNAT_QDMA	(0)
+#endif
+
+#ifdef	CONFIG_ARCH_MT7622_WIFI_HW_NAT
+#define WARP_WHNAT	BIT(5)
+#else
+#define WARP_WHNAT	(0)
+#endif
+
+#ifdef	CONFIG_RA_HW_NAT_WIFI
+#define WIFI_HNAT	BIT(6)
+#else
+#define WIFI_HNAT	(0)
+#endif
+
+#ifdef	CONFIG_RA_HW_NAT_WIFI_NEW_ARCH
+#define WIFI_HNAT	BIT(6)
+#else
+#define WIFI_HNAT	(0)
+#endif
+
+#ifdef	CONFIG_WAN_AT_P4
+#define HNAT_WAN_P4	BIT(7)
+#else
+#define HNAT_WAN_P4	(0)
+#endif
+
+#ifdef	CONFIG_WAN_TO_WLAN_SUPPORT_QOS
+#define WAN_TO_WLAN_QOS	BIT(8)
+#else
+#define WAN_TO_WLAN_QOS		(0)
+#endif
+
+#ifdef	CONFIG_RAETH_SPECIAL_TAG
+#define HNAT_SP_TAG	BIT(9)
+#else
+#define HNAT_SP_TAG		(0)
+#endif
+
+#ifdef CONFIG_RAETH_QDMATX_QDMARX
+#define QDMA_TX_RX	BIT(10)
+#else
+#define QDMA_TX_RX		(0)
+#endif
+
+#ifdef CONFIG_PPE_MIB
+#define PPE_MIB	BIT(11)
+#else
+#define PPE_MIB		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_PACKET_SAMPLING
+#define PACKET_SAMPLING	BIT(12)
+#else
+#define PACKET_SAMPLING		(0)
+#endif
+
+#ifdef CONFIG_SUPPORT_OPENWRT
+#define HNAT_OPENWRT	BIT(13)
+#else
+#define HNAT_OPENWRT		(0)
+#endif
+
+#ifdef CONFIG_SUPPORT_WLAN_QOS
+#define HNAT_WLAN_QOS	BIT(14)
+#else
+#define HNAT_WLAN_QOS		(0)
+#endif
+
+#ifdef CONFIG_SUPPORT_WLAN_OPTIMIZE
+#define WLAN_OPTIMIZE	BIT(15)
+#else
+#define WLAN_OPTIMIZE		(0)
+#endif
+
+#ifdef USE_UDP_FRAG
+#define UDP_FRAG	BIT(16)
+#else
+#define UDP_FRAG		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_AUTO_MODE
+#define AUTO_MODE	BIT(17)
+#else
+#define AUTO_MODE		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_SEMI_AUTO_MODE
+#define SEMI_AUTO_MODE	BIT(18)
+#else
+#define SEMI_AUTO_MODE		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_MANUAL_MODE
+#define MANUAL_MODE	BIT(19)
+#else
+#define MANUAL_MODE		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_PREBIND
+#define PRE_BIND	BIT(20)
+#else
+#define PRE_BIND		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_ACCNT_MAINTAINER
+#define ACCNT_MAINTAINER	BIT(21)
+#else
+#define ACCNT_MAINTAINER		(0)
+#endif
+
+#ifdef CONFIG_HW_NAT_IPI
+#define HNAT_IPI	BIT(21)
+#else
+#define HNAT_IPI		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_HASH_DBG_IPV6_SIP
+#define DBG_IPV6_SIP	BIT(22)
+#else
+#define DBG_IPV6_SIP		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_HASH_DBG_IPV4_SIP
+#define DBG_IPV4_SIP	BIT(23)
+#else
+#define DBG_IPV4_SIP		(0)
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_HASH_DBG_SPORT
+#define DBG_SP	BIT(24)
+#else
+#define DBG_SP		(0)
+#endif
+
+#ifdef CONFIG_QDMA_SUPPORT_QOS
+#define ETH_QOS	BIT(25)
+#else
+#define ETH_QOS		(0)
+#endif
+
+#endif
+
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_define.h b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_define.h
new file mode 100644
index 0000000..d407dc5
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_define.h
@@ -0,0 +1,107 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef _FOE_DEFINE_WANTED
+#define _FOE_DEFINE_WANTED
+
+#include "frame_engine.h"
+extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
+extern void (*ppe_dev_register_hook)(struct net_device *dev);
+extern void (*ppe_dev_unregister_hook)(struct net_device *dev);
+extern u8 bind_dir;
+extern u16 wan_vid;
+extern u16 lan_vid;
+extern struct foe_entry *ppe_virt_foe_base_tmp;
+#if defined(CONFIG_RAETH_QDMA)
+extern unsigned int M2Q_table[64];
+extern unsigned int lan_wan_separate;
+#endif
+extern struct hwnat_ac_args ac_info[64];
+extern u32 debug_level;
+extern struct net_device *dst_port[MAX_IF_NUM];
+
+extern struct ps_entry *ppe_ps_base;
+/*extern struct pkt_parse_result ppe_parse_result;*/
+extern int dbg_cpu_reason;
+#ifdef CONFIG_SUPPORT_OPENWRT
+#define DEV_NAME_HNAT_LAN	"eth0"
+#define DEV_NAME_HNAT_WAN	"eth1"
+#else
+#define DEV_NAME_HNAT_LAN	"eth2"
+#define DEV_NAME_HNAT_WAN	"eth3"
+#endif
+
+#ifdef CONFIG_RA_HW_NAT_PACKET_SAMPLING
+static inline void hwnat_set_packet_sampling(struct foe_entry *entry)
+{
+	entry->ipv4_hnapt.bfib1.ps = 1;
+}
+#else
+static inline void hwnat_set_packet_sampling(struct foe_entry *entry)
+{
+}
+#endif
+
+#if !defined(CONFIG_RALINK_MT7621)
+static inline void hwnat_set_6rd_id(struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+	reg_modify_bits(PPE_HASH_SEED, ntohs(ppe_parse_result->iph.id), 0, 16);
+	entry->ipv6_6rd.per_flow_6rd_id = 1;
+}
+#else
+static inline void hwnat_set_6rd_id(struct foe_entry *entry, struct pkt_parse_result *ppe_parse_result)
+{
+}
+#endif
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ioctl.c b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ioctl.c
new file mode 100755
index 0000000..44f99da
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ioctl.c
@@ -0,0 +1,1272 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include "ra_nat.h"
+
+#include "frame_engine.h"
+#include "hnat_ioctl.h"
+#include "foe_fdb.h"
+#include "util.h"
+#include "mcast_tbl.h"
+
+unsigned char bind_dir = BIDIRECTION;
+/*please choose any one of your LAN side VLAN IDs if you use different VLAN ID for each LAN port*/
+unsigned short lan_vid = 1;
+/*please choose any one of your WAN side VLAN IDs if you use different VLAN ID for each WAN port*/
+unsigned short wan_vid = 2;
+int debug_level;
+extern struct foe_entry *ppe1_foe_base;
+extern struct mib_entry *ppe1_mib_base;
+/*#if defined (CONFIG_HW_NAT_IPI)*/
+/*extern int HnatIPITimerSetup(void);*/
+/*extern hnat_ipi_cfg* hnat_ipi_config;*/
+/*extern hnat_ipi_s* hnat_ipi_from_extif[num_possible_cpus()];*/
+/*extern hnat_ipi_s* hnat_ipi_from_ppehit[num_possible_cpus()];*/
+/*extern hnat_ipi_stat* hnat_ipi_status[num_possible_cpus()];*/
+/*#endif*/
+
+long hw_nat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct hwnat_args *opt = (struct hwnat_args *)arg;
+	struct hwnat_tuple *opt2 = (struct hwnat_tuple *)arg;
+	struct hwnat_tuple *opt2_k;
+	struct hwnat_ac_args *opt3 = (struct hwnat_ac_args *)arg;
+	struct hwnat_ac_args *opt3_k;
+	struct hwnat_config_args *opt4 = (struct hwnat_config_args *)arg;
+	struct hwnat_config_args *opt4_k;
+	struct hwnat_mcast_args *opt5 = (struct hwnat_mcast_args *)arg;
+	struct hwnat_mcast_args *opt5_k;
+	struct foe_entry *entry = NULL;
+	struct foe_entry *entry1 = NULL;
+
+	struct hwnat_mib_args *opt6 = (struct hwnat_mib_args *)arg;
+	struct hwnat_mib_args *opt6_k;
+	struct hwnat_mib_all_ip_args *opt7 = (struct hwnat_mib_all_ip_args *)arg;
+	struct hwnat_mib_all_ip_args *opt7_k;
+	unsigned long tx_pkt_cnt = 0;
+	unsigned long tx_byte_cnt = 0;
+	unsigned long rx_pkt_cnt = 0;
+	unsigned long rx_byte_cnt = 0;
+
+	struct hwnat_args *opt1;
+/*#if defined (CONFIG_HW_NAT_IPI)*/
+/*	struct hwnat_ipi_args *opt8 = (struct hwnat_ipi_args *)arg;*/
+/*	struct hwnat_ipi_args *opt8_k;*/
+/*	struct hwnat_ipi_args *opt7 = (struct hwnat_ipi_args *)arg;*/
+/*	struct hwnat_ipi_args *opt7_k;*/
+/*#endif*/
+	int size;
+/*#if defined (CONFIG_HW_NAT_IPI)*/
+/*	int i,j;*/
+/*#endif*/
+	size = sizeof(struct hwnat_args) + sizeof(struct hwnat_tuple) * 1024 * 16;
+	switch (cmd) {
+	case HW_NAT_ADD_ENTRY:
+		opt2_k = vmalloc(sizeof(*opt2_k));
+		if (opt2_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt2_k, opt2, sizeof(*opt2_k))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt2_k);
+			break;
+		}
+		opt2_k->result = foe_add_entry_dvt(opt2_k);
+		vfree(opt2_k);
+		break;
+	case HW_NAT_DEL_ENTRY:
+		pr_notice("HW_NAT_DEL_ENTRY\n");
+		opt2_k = vmalloc(sizeof(*opt2_k));
+		if (opt2_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt2_k, opt2, sizeof(*opt2_k))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt2_k);
+			break;
+		}
+		opt2_k->result = foe_del_entry(opt2_k);
+		vfree(opt2_k);
+		break;
+	case HW_NAT_GET_ALL_ENTRIES:
+
+		opt1 = vmalloc(size);
+		if (opt1 == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt1, opt, size))
+			pr_notice("copy_from_user fail\n");
+		opt1->result = foe_get_all_entries(opt1);
+		if (copy_to_user(opt, opt1, size))
+			pr_notice("copy_to_user fail\n");
+
+		vfree(opt1);
+		break;
+	case HW_NAT_BIND_ENTRY:
+		opt1 = vmalloc(sizeof(*opt1));
+		if (opt1 == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt1, opt, sizeof(struct hwnat_args))){
+			pr_debug("copy_from_user fail\n");
+			vfree(opt1);
+			break;
+		}
+		opt1->result = foe_bind_entry(opt1);
+		vfree(opt1);
+		break;
+	case HW_NAT_UNBIND_ENTRY:
+		opt1 = vmalloc(sizeof(*opt1));
+		if (opt1 == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt1, opt, sizeof(struct hwnat_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt1);
+			break;
+		}
+		opt1->result = foe_un_bind_entry(opt1);
+		vfree(opt1);
+		break;
+	case HW_NAT_DROP_ENTRY:
+		opt1 = vmalloc(sizeof(*opt1));
+		if (opt1 == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt1, opt, sizeof(struct hwnat_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt1);
+			break;
+		}
+		opt1->result = foe_drop_entry(opt1);
+		vfree(opt1);
+		break;
+	case HW_NAT_INVALID_ENTRY:
+		opt1 = vmalloc(sizeof(*opt1));
+		if (opt1 == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt1, opt, sizeof(struct hwnat_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt1);
+			break;
+		}
+		if ((opt1->entry_num) >= FOE_4TB_SIZ) {
+			pr_debug("entry_num too large\n");
+			vfree(opt1);
+			break;
+		}
+		opt1->result = foe_del_entry_by_num(opt1->entry_num);
+		vfree(opt1);
+		break;
+	case HW_NAT_DUMP_ENTRY:
+		opt1 = vmalloc(size);
+		if (opt1 == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt1, opt, sizeof(struct hwnat_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt1);
+			break;
+		}
+		if ((opt1->entry_num) >= FOE_4TB_SIZ) {
+			pr_debug("entry_num too large\n");
+			vfree(opt1);
+			break;
+		}
+		entry =  &ppe_foe_base[(u32)opt1->entry_num];
+		foe_dump_entry((u32)((u32)opt1->entry_num), entry);
+
+		entry1 = &ppe1_foe_base[(u32)opt1->entry_num];
+		foe_dump_entry((u32)opt1->entry_num, entry1);
+		vfree(opt1);
+		break;
+	case HW_NAT_DUMP_CACHE_ENTRY:
+		foe_dump_cache_entry();
+		break;
+	case HW_NAT_DEBUG:	/* For Debug */
+		opt1 = vmalloc(size);
+		if (opt1 == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt1, opt, sizeof(struct hwnat_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt1);
+			break;
+		}
+		debug_level = opt1->debug;
+		vfree(opt1);
+		break;
+	case HW_NAT_GET_AC_CNT:
+		opt3_k = vmalloc(sizeof(*opt3_k));
+		if (opt3_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt3_k, opt3, sizeof(*opt3_k))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt3_k);
+			break;
+		}
+		opt3_k->result = ppe_get_agcnt(opt3_k);
+		if (copy_to_user(opt3, opt3_k, sizeof(*opt3_k)))
+			pr_notice("copy_to_user fail\n");
+		vfree(opt3_k);
+		break;
+	case HW_NAT_BIND_THRESHOLD:
+		opt4_k = vmalloc(sizeof(*opt4_k));
+		if (opt4_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt4_k, opt4, sizeof(struct hwnat_config_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt4_k);
+			break;
+		}
+		opt4_k->result = ppe_set_bind_threshold(opt4_k->bind_threshold);
+		vfree(opt4_k);
+		break;
+	case HW_NAT_MAX_ENTRY_LMT:
+		opt4_k = vmalloc(sizeof(*opt4_k));
+		if (opt4_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt4_k, opt4, sizeof(struct hwnat_config_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt4_k);
+			break;
+		}
+		opt4_k->result =
+		    ppe_set_max_entry_limit(opt4_k->foe_full_lmt,
+					    opt4_k->foe_half_lmt, opt4_k->foe_qut_lmt);
+		vfree(opt4_k);
+		break;
+	case HW_NAT_KA_INTERVAL:
+		opt4_k = vmalloc(sizeof(*opt4_k));
+		if (opt4_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt4_k, opt4, sizeof(struct hwnat_config_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt4_k);
+			break;
+		}
+		opt4_k->result = ppe_set_ka_interval(opt4->foe_tcp_ka, opt4->foe_udp_ka);
+		vfree(opt4_k);
+		break;
+	case HW_NAT_UB_LIFETIME:
+		opt4_k = vmalloc(sizeof(*opt4_k));
+		if (opt4_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt4_k, opt4, sizeof(struct hwnat_config_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt4_k);
+			break;
+		}
+		opt4_k->result = ppe_set_unbind_lifetime(opt4_k->foe_unb_dlta);
+		vfree(opt4_k);
+		break;
+	case HW_NAT_BIND_LIFETIME:
+		opt4_k = vmalloc(sizeof(*opt4_k));
+		if (opt4_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt4_k, opt4, sizeof(struct hwnat_config_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt4_k);
+			break;
+		}
+		opt4_k->result =
+		    ppe_set_bind_lifetime(opt4_k->foe_tcp_dlta,
+					  opt4_k->foe_udp_dlta, opt4_k->foe_fin_dlta);
+		vfree(opt4_k);
+		break;
+	case HW_NAT_BIND_DIRECTION:
+		opt4_k = vmalloc(sizeof(*opt4_k));
+		if (opt4_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt4_k, opt4, sizeof(struct hwnat_config_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt4_k);
+			break;
+		}
+		bind_dir = opt4_k->bind_dir;
+		vfree(opt4_k);
+		break;
+	case HW_NAT_VLAN_ID:
+		opt4_k = vmalloc(sizeof(*opt4_k));
+		if (opt4_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt4_k, opt4, sizeof(struct hwnat_config_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt4_k);
+			break;
+		}
+		wan_vid = opt4_k->wan_vid;
+		lan_vid = opt4_k->lan_vid;
+		vfree(opt4_k);
+		break;
+	case HW_NAT_MCAST_INS:
+		opt5_k = vmalloc(sizeof(*opt5_k));
+		if (opt5_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt5_k, opt5, sizeof(struct hwnat_mcast_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt5_k);
+			break;
+		}
+		foe_mcast_entry_ins(opt5_k->mc_vid, opt5_k->dst_mac, opt5_k->mc_px_en,
+				opt5_k->mc_px_qos_en, opt5_k->mc_qos_qid);
+		vfree(opt5_k);
+		break;
+	case HW_NAT_MCAST_DEL:
+		opt5_k = vmalloc(sizeof(*opt5_k));
+		if (opt5_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt5_k, opt5, sizeof(struct hwnat_mcast_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt5_k);
+			break;
+		}
+		foe_mcast_entry_del(opt5->mc_vid, opt5->dst_mac, opt5->mc_px_en, opt5->mc_px_qos_en,
+				    opt5->mc_qos_qid);
+		vfree(opt5_k);
+		break;
+	case HW_NAT_MCAST_DUMP:
+		foe_mcast_entry_dump();
+		break;
+	case HW_NAT_MIB_DUMP:
+		opt6_k = vmalloc(sizeof(*opt6_k));
+		if (opt6_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt6_k, opt6, sizeof(struct hwnat_mib_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt6_k);
+			break;
+		}
+		if ((opt6_k->entry_num) >= FOE_4TB_SIZ) {
+			pr_debug("entry_num too large\n");
+			vfree(opt6_k);
+			break;
+		}
+		ppe_mib_dump(opt6_k->entry_num, &tx_pkt_cnt, &tx_byte_cnt);
+		vfree(opt6_k);
+		break;
+	case HW_NAT_MIB_DRAM_DUMP:
+		opt6_k = vmalloc(sizeof(*opt6_k));
+		if (opt6_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt6_k, opt6, sizeof(struct hwnat_mib_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt6_k);
+			break;
+		}
+		if ((opt6_k->entry_num) >= FOE_4TB_SIZ) {
+			pr_debug("entry_num too large\n");
+			vfree(opt6_k);
+			break;
+		}
+		ppe_mib_dram_dump(opt6_k->entry_num);
+		vfree(opt6_k);
+		break;
+	case HW_NAT_MIB_GET:
+		opt2_k = vmalloc(sizeof(*opt2_k));
+		if (opt2_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt2_k, opt2, sizeof(*opt2_k))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt2_k);
+			break;
+		}
+		opt2_k->result = get_ppe_mib(opt2_k, &tx_pkt_cnt, &tx_byte_cnt, &rx_pkt_cnt, &rx_byte_cnt);
+		pr_notice("!!!!, tx byte = %lu\n", tx_byte_cnt);
+		pr_notice("!!!!, tx pkt = %lu\n", tx_pkt_cnt);
+		pr_notice("!!!!, rx byte = %lu\n", rx_byte_cnt);
+		pr_notice("!!!!, rx pkt = %lu\n", rx_pkt_cnt);
+		vfree(opt2_k);
+		break;
+	case HW_NAT_MIB_GET_ALL_IP:
+		opt7_k = vmalloc(sizeof(struct hwnat_mib_all_ip_args));
+		if (opt7_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt7_k, opt7, sizeof(struct hwnat_mib_all_ip_args))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt7_k);
+			break;
+		}
+		get_ppe_mib_ip(opt7_k);
+		if (copy_to_user(opt7, opt7_k, sizeof(struct hwnat_mib_all_ip_args))) {
+			pr_debug("copy_to_user fail\n");
+			vfree(opt7_k);
+			break;
+		}
+		vfree(opt7_k);
+		break;
+	case HW_NAT_TBL_CLEAR:
+		ppe_tbl_clear();
+		break;
+	case HW_NAT_IPI_CTRL_FROM_EXTIF:
+#if defined(CONFIG_HW_NAT_IPI)
+		/* Dora */
+		opt8_k = vmalloc(sizeof(*opt8_k));
+		if (opt8_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt8_k, opt8, sizeof(*opt8_k)))
+			pr_notice("copy_from_user fail\n");
+		local_irq_disable();
+		if ((opt8->hnat_ipi_enable == 1) && (hnat_ipi_config->enable_from_extif != 1)) {
+			hnat_ipi_config->enable_from_extif = opt8_k->hnat_ipi_enable;
+			hnat_ipi_timer_setup();
+		} else {
+			hnat_ipi_config->enable_from_extif = opt8_k->hnat_ipi_enable;
+		}
+		hnat_ipi_config->queue_thresh_from_extif = opt8_k->queue_thresh;
+		hnat_ipi_config->drop_pkt_from_extif = opt8_k->drop_pkt;
+		hnat_ipi_config->ipi_cnt_mod_from_extif = opt8_k->ipi_cnt_mod;
+		local_irq_enable();
+		pr_notice("*** [FromExtIf]hnat_ipi_enable=%d, queue_thresh=%d, drop_pkt=%d ***\n",
+			hnat_ipi_config->enable_from_extif,
+			hnat_ipi_config->queue_thresh_from_extif,
+			hnat_ipi_config->drop_pkt_from_extif);
+		if (hnat_ipi_config->enable_from_extif == 1) {
+			hnat_ipi_s *phnat_ipi;
+			hnat_ipi_stat *phnat_ipi_status;
+		/* if (1) { */
+			/*extern unsigned int ipidbg[num_possible_cpus()][10];*/
+
+			for (i = 0; i < num_possible_cpus(); i++) {
+				phnat_ipi = hnat_ipi_from_extif[i];
+				phnat_ipi_status = hnat_ipi_status[i];
+#if defined(HNAT_IPI_DQ)
+				pr_notice("skbQueue[%d].qlen=%d,%d, dropPktNum[%d]=%d,\n", i,
+					phnat_ipi->skb_input_queue.qlen, phnat_ipi->skb_process_queue.qlen,
+					i, phnat_ipi_status->drop_pkt_num_from_extif);
+				pr_notice("cpu_status[%d]=%d, smp_call_cnt[%d]=%d\n", i,
+					atomic_read(&phnat_ipi_status->cpu_status_from_extif), i,
+					phnat_ipi_status->smp_call_cnt_from_extif);
+#elif defined(HNAT_IPI_RXQUEUE)
+				pr_notice("rx_queue_num[%d]=%d, dropPktNum[%d]=%d\n", i
+					phnat_ipi->rx_queue_num, i, phnat_ipi_status->drop_pkt_num_from_extif);
+				pr_notice("cpu_status[%d]=%d, smp_call_cnt[%d]=%d\n", i,
+					atomic_read(&phnat_ipi_status->cpu_status_from_extif), i,
+					phnat_ipi_status->smp_call_cnt_from_extif);
+#else
+				pr_notice("skb_ipi_queue[%d].qlen=%d, dropPktNum[%d]=%d\n", i,
+					skb_queue_len(&phnat_ipi->skb_ipi_queue), i,
+								phnat_ipi_status->drop_pkt_num_from_extif);
+				pr_notice("cpu_status[%d]=%d, smp_call_cnt[%d]=%d\n", i,
+					atomic_read(&phnat_ipi_status->cpu_status_from_extif), i,
+					phnat_ipi_status->smp_call_cnt_from_extif);
+#endif
+				phnat_ipi_status->drop_pkt_num_from_extif = 0;
+				phnat_ipi_status->smp_call_cnt_from_extif = 0;
+			}
+			for (i = 0; i < 10; i++) {
+				for (j = 0; j < num_possible_cpus(); j++) {
+					pr_notice("dbg[%d][%d]=%d,", j, i, ipidbg[j][i]);
+					if (j == 3)
+						pr_notice("\n");
+				}
+			}
+			memset(ipidbg, 0, sizeof(ipidbg));
+		}
+		vfree(opt8_k);
+#endif
+
+		break;
+	case HW_NAT_IPI_CTRL_FROM_PPEHIT:
+		/* Dora */
+#if defined(CONFIG_HW_NAT_IPI)
+		opt7_k = vmalloc(sizeof(*opt7_k));
+		if (opt7_k == NULL) {
+			pr_notice("vmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt7_k, opt7, sizeof(*opt7_k)))
+			pr_notice("copy_from_user fail\n");
+		local_irq_disable();
+		pr_notice("*** [FromPPE]hnat_ipi_enable=%d, queue_thresh=%d, drop_pkt=%d ***\n",
+			hnat_ipi_config->enable_from_ppehit,
+					hnat_ipi_config->queue_thresh_from_ppehit,
+					hnat_ipi_config->drop_pkt_from_ppehit);
+		if ((opt7->hnat_ipi_enable == 1) && (hnat_ipi_config->enable_from_ppehit != 1)) {
+			hnat_ipi_config->enable_from_ppehit = opt7_k->hnat_ipi_enable;
+			hnat_ipi_timer_setup();
+		} else {
+			hnat_ipi_config->enable_from_ppehit = opt7_k->hnat_ipi_enable;
+		}
+		hnat_ipi_config->queue_thresh_from_ppehit = opt7_k->queue_thresh;
+		hnat_ipi_config->drop_pkt_from_ppehit = opt7_k->drop_pkt;
+		hnat_ipi_config->ipi_cnt_mod_from_ppehit = opt7_k->ipi_cnt_mod;
+		local_irq_enable();
+
+		if (hnat_ipi_config->enable_from_ppehit == 1) {
+			hnat_ipi_s *phnat_ipi;
+			hnat_ipi_stat *phnat_ipi_status;
+		/* if (1) { */
+			/*extern unsigned int ipidbg2[num_possible_cpus()][10];*/
+
+			for (i = 0; i < num_possible_cpus(); i++) {
+				phnat_ipi = hnat_ipi_from_ppehit[i];
+				phnat_ipi_status = hnat_ipi_status[i];
+#if defined(HNAT_IPI_DQ)
+
+				pr_notice("skbQueue[%d].qlen=%d,%d, dropPktNum[%d]=%d\n",
+					i, phnat_ipi->skb_input_queue.qlen,
+					phnat_ipi->skb_process_queue.qlen,
+					i, phnat_ipi_status->drop_pktnum_from_ppehit);
+				pr_notice("cpu_status[%d]=%d, smp_call_cnt[%d]=%d\n", i,
+					atomic_read(&phnat_ipi_status->cpu_status_from_ppehit), i,
+					phnat_ipi_status->smp_call_cnt_from_ppehit);
+#elif defined(HNAT_IPI_RXQUEUE)
+				pr_notice("rx_queue_num[%d]=%d, dropPktNum[%d]=%d\n", i,
+					phnat_ipi->rx_queue_num, i, phnat_ipi_status->drop_pktnum_from_ppehit);
+				pr_notice("cpu_status[%d]=%d, smp_call_cnt[%d]=%d\n", i,
+					atomic_read(&phnat_ipi_status->cpu_status_from_ppehit), i,
+					phnat_ipi_status->smp_call_cnt_from_ppehit);
+#else
+				pr_notice("skb_ipi_queue[%d].qlen=%d, dropPktNum[%d]=%d\n", i,
+					skb_queue_len(&phnat_ipi->skb_ipi_queue), i,
+						phnat_ipi_status->drop_pktnum_from_ppehit);
+				pr_notice("cpu_status[%d]=%d, smp_call_cnt[%d]=%d\n", i,
+					atomic_read(&phnat_ipi_status->cpu_status_from_ppehit), i,
+						phnat_ipi_status->smp_call_cnt_from_ppehit))
+#endif
+				phnat_ipi_status->drop_pktnum_from_ppehit = 0;
+				phnat_ipi_status->smp_call_cnt_from_ppehit = 0;
+			}
+			for (i = 0; i < 10; i++) {
+				for (j = 0; j < cpu_possible(); j++) {
+					pr_notice("dbg2[%d][%d]=%d,", j, i, ipidbg2[j][i]);
+					if (j == 3)
+						pr_notice("\n");
+				}
+			}
+			memset(ipidbg2, 0, sizeof(ipidbg2));
+		}
+		vfree(opt7_k);
+#endif
+
+		break;
+	case HW_NAT_DPORT:
+		dump_dport();
+		break;
+
+	case HW_NAT_CLEAR_HOOK:
+	case HW_NAT_RESTORE_HOOK:
+
+		opt2_k = vmalloc(sizeof(*opt2_k));
+		if (opt2_k == NULL) {
+			pr_notice("kmalloc fail\n");
+			break;
+		}
+		if (copy_from_user(opt2_k, opt2, sizeof(*opt2_k))) {
+			pr_debug("copy_from_user fail\n");
+			vfree(opt2_k);
+			break;
+		}
+		ppe_modify_hook(
+			(cmd == HW_NAT_CLEAR_HOOK) ? true : false, opt2_k->dst_port, opt2_k->etype);
+
+		vfree(opt2_k);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+const struct file_operations hw_nat_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl = hw_nat_ioctl,
+	.llseek		= no_llseek,
+};
+
+struct cdev hnat_cdev;
+struct class *hnat_class;
+
+int ppe_reg_ioctl_handler(dev_t dev)
+{
+	int error;
+
+	dev = MKDEV(HW_NAT_MAJOR, 0);
+	error = register_chrdev_region(dev, 1, HW_NAT_DEVNAME);
+
+	if (error < 0)
+		pr_notice("register error!!!!\n");
+
+	cdev_init(&hnat_cdev, &hw_nat_fops);
+	error = cdev_add(&hnat_cdev, dev, 1);
+	if (error)
+		pr_notice("cdev_add error !!!!\n");
+
+ 	hnat_class = class_create(THIS_MODULE, "hnat");
+ 	if (IS_ERR(hnat_class))
+		pr_notice("Error creating hnat class.\n");
+
+ 	device_create(hnat_class, NULL, MKDEV(HW_NAT_MAJOR, 0), NULL, HW_NAT_DEVNAME);
+
+	return 0;
+}
+
+void ppe_unreg_ioctl_handler(dev_t dev)
+{
+	dev = MKDEV(HW_NAT_MAJOR, 0);
+	pr_notice("major = %u, minor=%u\n", MAJOR(dev), MINOR(dev));
+	device_destroy(hnat_class, MKDEV(HW_NAT_MAJOR, 0));
+	class_destroy(hnat_class);
+
+	cdev_del(&hnat_cdev);
+	unregister_chrdev_region(MAJOR(dev), 1);
+}
+
+int reply_entry_idx(struct hwnat_tuple *opt, unsigned int entry_num)
+{
+	struct foe_entry *entry = &ppe_foe_base[entry_num];
+	struct foe_pri_key key;
+	s32 hash_index;
+
+	if (opt->pkt_type == IPV4_NAPT) {
+		key.ipv4_hnapt.sip = entry->ipv4_hnapt.new_dip;
+		key.ipv4_hnapt.dip = entry->ipv4_hnapt.new_sip;
+		key.ipv4_hnapt.sport = entry->ipv4_hnapt.new_dport;
+		key.ipv4_hnapt.dport = entry->ipv4_hnapt.new_sport;
+		key.ipv4_hnapt.is_udp = opt->is_udp;
+	}
+	if (opt->pkt_type == IPV6_ROUTING) {
+		if (fe_feature & HNAT_IPV6) {
+			key.ipv6_routing.sip0 = entry->ipv6_5t_route.ipv6_dip0;
+			key.ipv6_routing.sip1 = entry->ipv6_5t_route.ipv6_dip1;
+			key.ipv6_routing.sip2 = entry->ipv6_5t_route.ipv6_dip2;
+			key.ipv6_routing.sip3 = entry->ipv6_5t_route.ipv6_dip3;
+			key.ipv6_routing.dip0 = entry->ipv6_5t_route.ipv6_sip0;
+			key.ipv6_routing.dip1 = entry->ipv6_5t_route.ipv6_sip1;
+			key.ipv6_routing.dip2 = entry->ipv6_5t_route.ipv6_sip2;
+			key.ipv6_routing.dip3 = entry->ipv6_5t_route.ipv6_sip3;
+			key.ipv6_routing.sport = entry->ipv6_5t_route.dport;
+			key.ipv6_routing.dport = entry->ipv6_5t_route.sport;
+			key.ipv6_routing.is_udp = opt->is_udp;
+		}
+	}
+	entry = NULL;
+	key.pkt_type = opt->pkt_type;
+	hash_index = get_mib_entry_idx(&key, entry);
+	if (debug_level >= 1)
+		pr_notice("reply entry idx = %d\n", hash_index);
+
+	return hash_index;
+}
+
+void ppe_mib_dram_dump(uint32_t entry_num)
+{
+	struct mib_entry *mib_entry = &ppe_mib_base[entry_num];
+	struct mib_entry *mib_entry1 = &ppe1_mib_base[entry_num];
+
+
+	pr_notice("***********DRAM PPE0 Entry = %d*********\n", entry_num);
+	pr_notice("PpeMibBase = %p\n", ppe_mib_base);
+	pr_notice("DRAM Packet_CNT H = %u\n", mib_entry->pkt_cnt_h);
+	pr_notice("DRAM Packet_CNT L = %u\n", mib_entry->pkt_cnt_l);
+	pr_notice("DRAM Byte_CNT H = %u\n", mib_entry->byt_cnt_h);
+	pr_notice("DRAM Byte_CNT L = %u\n", mib_entry->byt_cnt_l);
+	
+	pr_notice("***********DRAM PPE1 Entry = %d*********\n", entry_num);
+	pr_notice("Ppe1MibBase = %p\n", ppe1_mib_base);
+	pr_notice("DRAM Packet_CNT H = %u\n", mib_entry1->pkt_cnt_h);
+	pr_notice("DRAM Packet_CNT L = %u\n", mib_entry1->pkt_cnt_l);
+	pr_notice("DRAM Byte_CNT H = %u\n", mib_entry1->byt_cnt_h);
+	pr_notice("DRAM Byte_CNT L = %u\n", mib_entry1->byt_cnt_l);
+}
+
+void ppe_mib_dump(unsigned int entry_num, unsigned long *pkt_cnt, unsigned long *byte_cnt)
+{
+	unsigned int byt_l = 0;
+	unsigned long long byt_h = 0;
+	unsigned int pkt_l = 0;
+
+	unsigned int long pkt_h = 0;
+	reg_write(MIB_SER_CR, entry_num | (1 << 16));
+	while (1) {
+		if (!((reg_read(MIB_SER_CR) & 0x10000) >> 16))
+			break;
+	}
+	/*make sure write dram correct*/
+	wmb();
+	byt_l = reg_read(MIB_SER_R0);	/* byte cnt bit31~ bit0 */
+	byt_h = reg_read(MIB_SER_R1) & 0xffff;	/* byte cnt bit47 ~ bit0 */
+	pkt_l = (reg_read(MIB_SER_R1) & 0xffff0000) >> 16;
+	pkt_h = reg_read(MIB_SER_R2) & 0xffffff;	/* packet cnt bit39 ~ bit16 */
+	if (debug_level >= 1) {
+		pr_notice("************PPE0 Entry = %d ************\n", entry_num);
+		pr_notice("Packet Cnt = %lu\n", (pkt_h << 16) + pkt_l);
+		pr_notice("Byte Cnt = %llu\n", (byt_h << 32) + byt_l);
+	}
+	*pkt_cnt = (pkt_h << 16) + pkt_l;
+	*byte_cnt = (byt_h << 32) + byt_l;
+	
+	pr_notice("**************************************************************\n");
+	
+	reg_write(MIB_SER_CR_PPE1, entry_num | (1 << 16));
+	while (1) {
+		if (!((reg_read(MIB_SER_CR_PPE1) & 0x10000) >> 16))
+			break;
+	}
+	/*make sure write dram correct*/
+	wmb();
+	byt_l = reg_read(MIB_SER_R0_PPE1);	/* byte cnt bit31~ bit0 */
+	byt_h = reg_read(MIB_SER_R1_PPE1) & 0xffff;	/* byte cnt bit47 ~ bit0 */
+	pkt_l = (reg_read(MIB_SER_R1_PPE1) & 0xffff0000) >> 16;
+	pkt_h = reg_read(MIB_SER_R2_PPE1) & 0xffffff;	/* packet cnt bit39 ~ bit16 */
+	if (debug_level >= 1) {
+		pr_notice("************PPE1 Entry = %d ************\n", entry_num);
+		pr_notice("Packet Cnt = %lu\n", (pkt_h << 16) + pkt_l);
+		pr_notice("Byte Cnt = %llu\n", (byt_h << 32) + byt_l);
+	}
+}
+
+int get_ppe_mib(struct hwnat_tuple *opt, unsigned long *tx_pkt_cnt, unsigned long *tx_byte_cnt,
+		unsigned long *rx_pkt_cnt, unsigned long *rx_byte_cnt)
+{
+	struct foe_pri_key key;
+	struct foe_entry *entry = NULL;
+	s32 hash_index;
+	s32 rply_idx;
+	/*pr_notice("sip = %x, dip=%x, sp=%d, dp=%d\n", opt->ing_sipv4, opt->ing_dipv4, opt->ing_sp, opt->ing_dp);*/
+	if ((opt->pkt_type) == IPV4_NAPT) {
+		key.ipv4_hnapt.sip = opt->ing_sipv4;
+		key.ipv4_hnapt.dip = opt->ing_dipv4;
+		key.ipv4_hnapt.sport = opt->ing_sp;
+		key.ipv4_hnapt.dport = opt->ing_dp;
+		key.ipv4_hnapt.is_udp = opt->is_udp;
+	} else if ((opt->pkt_type) == IPV6_ROUTING) {
+		key.ipv6_routing.sip0 = opt->ing_sipv6_0;
+		key.ipv6_routing.sip1 = opt->ing_sipv6_1;
+		key.ipv6_routing.sip2 = opt->ing_sipv6_2;
+		key.ipv6_routing.sip3 = opt->ing_sipv6_3;
+		key.ipv6_routing.dip0 = opt->ing_dipv6_0;
+		key.ipv6_routing.dip1 = opt->ing_dipv6_1;
+		key.ipv6_routing.dip2 = opt->ing_dipv6_2;
+		key.ipv6_routing.dip3 = opt->ing_dipv6_3;
+		key.ipv6_routing.sport = opt->ing_sp;
+		key.ipv6_routing.dport = opt->ing_dp;
+		key.ipv6_routing.is_udp = opt->is_udp;
+	}
+
+	key.pkt_type = opt->pkt_type;
+	hash_index = get_mib_entry_idx(&key, entry);
+
+	if (hash_index != -1) {
+		ppe_mib_dump(hash_index, tx_pkt_cnt, tx_byte_cnt);
+		rply_idx = reply_entry_idx(opt, hash_index);
+		if (rply_idx != -1) {
+			ppe_mib_dump(rply_idx, rx_pkt_cnt, rx_byte_cnt);
+		} else if (rply_idx == -1) {
+			*rx_pkt_cnt = 0;
+			*rx_byte_cnt = 0;
+		}
+		return HWNAT_SUCCESS;
+	}
+
+	return HWNAT_FAIL;
+}
+EXPORT_SYMBOL(get_ppe_mib);
+
+typedef enum hwnat_entry_type {
+LAN_TO_LAN = 0,
+LAN_TO_WAN = 1,
+WAN_TO_LAN = 2,
+}HWNAT_ENTRY_TYPE;
+
+static int update_mib_cnt_ip(struct hwnat_mib_all_ip_args *all_ip, struct foe_entry * entry,
+	int hash_index, HWNAT_ENTRY_TYPE type, int ppe)
+{
+	int j = 0;
+	int find_sip = 0, find_dip = 0;
+	unsigned long pkt_cnt = 0, byte_cnt= 0;
+
+    if(!ppe)
+		ppe_mib_dump_ppe0(hash_index, &pkt_cnt, &byte_cnt);
+    else
+		ppe_mib_dump_ppe1(hash_index, &pkt_cnt, &byte_cnt);
+
+    if (debug_level >= 5)
+		pr_notice("%s, idx=%d, type=%d, pkt_cnt=%ld, byte_cnt=%ld.\n", __func__,
+			hash_index, type, pkt_cnt, byte_cnt);
+
+	for (j=0; j<all_ip->entry_num; j++) {
+
+        switch (type) {
+            case LAN_TO_LAN:
+                if(entry->ipv4_hnapt.sip == all_ip->entries[j].ip.ipv4_addr) {
+                    find_sip = 1;
+                    all_ip->entries[j].tx_bytes += byte_cnt;
+                    all_ip->entries[j].tx_packets += pkt_cnt;
+                    if (debug_level >= 5)
+                        pr_notice("find_sip=%x tx_bytes=%ld, tx_packets=%ld\n", entry->ipv4_hnapt.sip,
+                            all_ip->entries[j].tx_bytes, all_ip->entries[j].tx_packets);
+                }
+                if(entry->ipv4_hnapt.dip == all_ip->entries[j].ip.ipv4_addr) {
+                    find_dip = 1;
+                    all_ip->entries[j].rx_bytes += byte_cnt;
+                    all_ip->entries[j].rx_packets += pkt_cnt;
+                    if (debug_level >= 5)
+                        pr_notice("find_dip=%x rx_bytes=%ld, rx_packets=%ld\n", entry->ipv4_hnapt.dip,
+                            all_ip->entries[j].rx_bytes, all_ip->entries[j].rx_packets);
+                }
+                break;
+            case LAN_TO_WAN:
+                if(entry->ipv4_hnapt.sip == all_ip->entries[j].ip.ipv4_addr) {
+                    find_sip = 1;
+                    all_ip->entries[j].tx_bytes += byte_cnt;
+                    all_ip->entries[j].tx_packets += pkt_cnt;
+                    if (debug_level >= 5)
+                       pr_notice("find_sip=%x tx_bytes=%ld, tx_packets=%ld\n", entry->ipv4_hnapt.sip,
+                            all_ip->entries[j].tx_bytes, all_ip->entries[j].tx_packets);
+                }
+                break;
+            case WAN_TO_LAN:
+                if(entry->ipv4_hnapt.new_dip == all_ip->entries[j].ip.ipv4_addr) {
+                    find_dip = 1;
+                    all_ip->entries[j].rx_bytes += byte_cnt;
+                    all_ip->entries[j].rx_packets += pkt_cnt;
+                    if (debug_level >= 5)
+                        pr_notice("find_newdip=%x rx_bytes=%ld, rx_packets=%ld\n", entry->ipv4_hnapt.new_dip,
+                            all_ip->entries[j].rx_bytes, all_ip->entries[j].rx_packets);
+                }
+                break;
+            default:
+                return HWNAT_FAIL;
+        }
+
+		/* loop end, sip and dip only find 1 time in all ip table */
+		if(LAN_TO_LAN == type) {
+			if(find_sip && find_dip)
+				break;
+		}
+		else if (LAN_TO_WAN == type) {
+			if(find_sip)
+				break;
+		}
+		else if (WAN_TO_LAN == type) {
+			if(find_dip)
+				break;
+		}
+	}
+
+    if (debug_level >= 5)
+		pr_notice("j=%d find_sip=%d, find_dip=%d\n", j, find_sip, find_dip);
+
+	if((LAN_TO_LAN == type || LAN_TO_WAN == type) && (!find_sip) ) {
+		/* insert new entry for sip tx */
+		all_ip->entries[all_ip->entry_num].ip.ipv4_addr = entry->ipv4_hnapt.sip;
+		all_ip->entries[all_ip->entry_num].tx_bytes = byte_cnt;
+		all_ip->entries[all_ip->entry_num].tx_packets = pkt_cnt;
+		all_ip->entries[all_ip->entry_num].is_ipv4 = 1;
+        if (debug_level >= 5)
+			pr_notice("not find sip type=%d, insert sip=%x tx_bytes=%ld, tx_packets=%ld\n",
+				type, entry->ipv4_hnapt.sip,
+				all_ip->entries[all_ip->entry_num].tx_bytes,
+				all_ip->entries[all_ip->entry_num].tx_packets);
+		all_ip->entry_num++;
+        if (all_ip->entry_num >= HWNAT_MAX_MIB_IP_ENTRY_NUM) {
+            pr_notice("ip stats table more than max number, fail.\n");
+            return HWNAT_FAIL;
+        }
+	}
+
+	if (!find_dip) {
+		if (LAN_TO_LAN == type) {
+			all_ip->entries[all_ip->entry_num].ip.ipv4_addr = entry->ipv4_hnapt.dip;
+			all_ip->entries[all_ip->entry_num].rx_bytes = byte_cnt;
+			all_ip->entries[all_ip->entry_num].rx_packets = pkt_cnt;
+			all_ip->entries[all_ip->entry_num].is_ipv4 = 1;
+            if (debug_level >= 5)
+				pr_notice("not find dip, type=%d, insert dip=%x rx_bytes=%ld, rx_packets=%ld\n",
+					type, entry->ipv4_hnapt.dip,
+					all_ip->entries[all_ip->entry_num].rx_bytes,
+					all_ip->entries[all_ip->entry_num].rx_packets);
+			all_ip->entry_num++;
+            if (all_ip->entry_num >= HWNAT_MAX_MIB_IP_ENTRY_NUM) {
+				pr_notice("ip stats table more than max number, fail.\n");
+                return HWNAT_FAIL;
+            }
+		}
+		else if (WAN_TO_LAN == type) {
+			all_ip->entries[all_ip->entry_num].ip.ipv4_addr = entry->ipv4_hnapt.new_dip;
+			all_ip->entries[all_ip->entry_num].rx_bytes = byte_cnt;
+			all_ip->entries[all_ip->entry_num].rx_packets = pkt_cnt;
+			all_ip->entries[all_ip->entry_num].is_ipv4 = 1;
+            if (debug_level >= 5)
+				pr_notice("not find dip, type=%d, insert new dip=%x rx_bytes=%ld, rx_packets=%ld\n",
+				    type, entry->ipv4_hnapt.new_dip,
+				    all_ip->entries[all_ip->entry_num].rx_bytes,
+					all_ip->entries[all_ip->entry_num].rx_packets);
+			all_ip->entry_num++;
+            if (all_ip->entry_num >= HWNAT_MAX_MIB_IP_ENTRY_NUM) {
+				pr_notice("ip stats table more than max number, fail.\n");
+                return HWNAT_FAIL;
+            }
+		}
+	}
+
+    return HWNAT_SUCCESS;
+}
+
+static int update_mib_cnt_ipv6(struct hwnat_mib_all_ip_args *all_ip, struct foe_entry * entry,
+	int hash_index, int ppe)
+{
+	int j = 0;
+	int find_sip = 0, find_dip = 0;
+	unsigned long pkt_cnt = 0, byte_cnt = 0;
+
+    if(!ppe)
+		ppe_mib_dump_ppe0(hash_index, &pkt_cnt, &byte_cnt);
+    else
+		ppe_mib_dump_ppe1(hash_index, &pkt_cnt, &byte_cnt);
+
+    if (debug_level >= 5)
+		pr_notice("%s, idx=%d, pkt_cnt=%ld, byte_cnt=%ld.\n", __func__,
+		    hash_index, pkt_cnt, byte_cnt);
+
+	for (j=0; j<all_ip->entry_num; j++) {
+		if(entry->ipv6_5t_route.ipv6_sip0 == all_ip->entries[j].ip.ipv6_addr[0] &&
+			entry->ipv6_5t_route.ipv6_sip1 == all_ip->entries[j].ip.ipv6_addr[1] &&
+			entry->ipv6_5t_route.ipv6_sip2 == all_ip->entries[j].ip.ipv6_addr[2] &&
+			entry->ipv6_5t_route.ipv6_sip3 == all_ip->entries[j].ip.ipv6_addr[3]) {
+			find_sip = 1;
+			all_ip->entries[j].tx_bytes += byte_cnt;
+			all_ip->entries[j].tx_packets += pkt_cnt;
+            if (debug_level >= 5)
+				pr_notice("find_SIPv6=%08X:%08X:%08X:%08X tx_bytes=%ld, tx_packets=%ld\n",
+				    entry->ipv6_5t_route.ipv6_sip0, entry->ipv6_5t_route.ipv6_sip1,
+				    entry->ipv6_5t_route.ipv6_sip2, entry->ipv6_5t_route.ipv6_sip3,
+				    all_ip->entries[j].tx_bytes, all_ip->entries[j].tx_packets);
+		}
+
+		if(entry->ipv6_5t_route.ipv6_dip0 == all_ip->entries[j].ip.ipv6_addr[0] &&
+			entry->ipv6_5t_route.ipv6_dip1 == all_ip->entries[j].ip.ipv6_addr[1] &&
+			entry->ipv6_5t_route.ipv6_dip2 == all_ip->entries[j].ip.ipv6_addr[2] &&
+			entry->ipv6_5t_route.ipv6_dip3 == all_ip->entries[j].ip.ipv6_addr[3]) {
+			find_dip = 1;
+			all_ip->entries[j].rx_bytes += byte_cnt;
+			all_ip->entries[j].rx_packets += pkt_cnt;
+            if (debug_level >= 5)
+				pr_notice("find_DIPv6=%08X:%08X:%08X:%08X rx_bytes=%ld, rx_packets=%ld\n",
+					entry->ipv6_5t_route.ipv6_dip0, entry->ipv6_5t_route.ipv6_dip1,
+					entry->ipv6_5t_route.ipv6_dip2, entry->ipv6_5t_route.ipv6_dip3,
+					all_ip->entries[j].rx_bytes, all_ip->entries[j].rx_packets);
+		}
+
+		if(find_sip && find_dip)
+			break;
+	}
+
+    if (debug_level >= 5)
+		pr_notice("j=%d find_sip=%d, find_dip=%d\n", j, find_sip, find_dip);
+
+	if (!find_sip) {
+        if (all_ip->entry_num >= HWNAT_MAX_MIB_IP_ENTRY_NUM) {
+			pr_notice("ip stats table more than max number, fail.\n");
+            return HWNAT_FAIL;
+        }
+		all_ip->entries[all_ip->entry_num].ip.ipv6_addr[0] = entry->ipv6_5t_route.ipv6_sip0;
+		all_ip->entries[all_ip->entry_num].ip.ipv6_addr[1] = entry->ipv6_5t_route.ipv6_sip1;
+		all_ip->entries[all_ip->entry_num].ip.ipv6_addr[2] = entry->ipv6_5t_route.ipv6_sip2;
+		all_ip->entries[all_ip->entry_num].ip.ipv6_addr[3] = entry->ipv6_5t_route.ipv6_sip3;
+		all_ip->entries[all_ip->entry_num].tx_bytes = byte_cnt;
+		all_ip->entries[all_ip->entry_num].tx_packets = pkt_cnt;
+        if (debug_level >= 5)
+			pr_notice("not find sip insert SIPv6=%08X:%08X:%08X:%08X  tx_bytes=%ld, tx_packets=%ld\n",
+				entry->ipv6_5t_route.ipv6_sip0, entry->ipv6_5t_route.ipv6_sip1,
+				entry->ipv6_5t_route.ipv6_sip2, entry->ipv6_5t_route.ipv6_sip3,
+				all_ip->entries[all_ip->entry_num].tx_bytes,
+				all_ip->entries[all_ip->entry_num].tx_packets);
+		all_ip->entry_num++;
+	}
+
+	if (!find_dip) {
+        if (all_ip->entry_num >= HWNAT_MAX_MIB_IP_ENTRY_NUM) {
+			pr_notice("ip stats table more than max number, fail.\n");
+            return HWNAT_FAIL;
+        }
+		all_ip->entries[all_ip->entry_num].ip.ipv6_addr[0] = entry->ipv6_5t_route.ipv6_dip0;
+		all_ip->entries[all_ip->entry_num].ip.ipv6_addr[1] = entry->ipv6_5t_route.ipv6_dip1;
+		all_ip->entries[all_ip->entry_num].ip.ipv6_addr[2] = entry->ipv6_5t_route.ipv6_dip2;
+		all_ip->entries[all_ip->entry_num].ip.ipv6_addr[3] = entry->ipv6_5t_route.ipv6_dip3;
+		all_ip->entries[all_ip->entry_num].rx_bytes = byte_cnt;
+		all_ip->entries[all_ip->entry_num].rx_packets = pkt_cnt;
+        if (debug_level >= 5)
+			pr_notice("not find dip, insert DIPv6=%08X:%08X:%08X:%08X rx_bytes=%ld, rx_packets=%ld\n",
+				entry->ipv6_5t_route.ipv6_dip0, entry->ipv6_5t_route.ipv6_dip1,
+				entry->ipv6_5t_route.ipv6_dip2, entry->ipv6_5t_route.ipv6_dip3,
+				all_ip->entries[all_ip->entry_num].rx_bytes,
+				all_ip->entries[all_ip->entry_num].rx_packets);
+		all_ip->entry_num++;
+    }
+
+    return HWNAT_SUCCESS;
+}
+
+static int update_mib_cnt_ip_by_ppe(struct hwnat_mib_all_ip_args *all_ip, int hash_index, int ppe)
+{
+	struct foe_entry *entry;
+
+    if (!ppe)
+		entry = &ppe_foe_base[hash_index];
+	else
+		entry = &ppe1_foe_base[hash_index];
+
+    if (entry->bfib1.state == BIND ) {
+        if (IS_IPV4_HNAPT(entry)) {
+            if (debug_level >= 5)
+				pr_notice("sip=%x, dip=%x, newsip=%x, newdip=%x.\n",
+					entry->ipv4_hnapt.sip, entry->ipv4_hnapt.dip,
+					entry->ipv4_hnapt.new_sip, entry->ipv4_hnapt.new_dip);
+			if(entry->ipv4_hnapt.sip == entry->ipv4_hnapt.new_sip &&
+				entry->ipv4_hnapt.dip == entry->ipv4_hnapt.new_dip) {
+				if(HWNAT_FAIL == update_mib_cnt_ip(all_ip, entry, hash_index, LAN_TO_LAN, ppe))
+					return HWNAT_FAIL;
+			} else if (entry->ipv4_hnapt.sip != entry->ipv4_hnapt.new_sip) {
+				if(HWNAT_FAIL == update_mib_cnt_ip(all_ip, entry, hash_index, LAN_TO_WAN, ppe))
+					return HWNAT_FAIL;
+			} else if (entry->ipv4_hnapt.dip != entry->ipv4_hnapt.new_dip) {
+				if(HWNAT_FAIL == update_mib_cnt_ip(all_ip, entry, hash_index, WAN_TO_LAN, ppe))
+					return HWNAT_FAIL;
+			}
+		} else if (IS_IPV6_5T_ROUTE(entry)) {
+			if(HWNAT_FAIL == update_mib_cnt_ipv6(all_ip, entry, hash_index, ppe))
+				return HWNAT_FAIL;
+		}
+    }
+    return HWNAT_SUCCESS;
+}
+int get_ppe_mib_ip(struct hwnat_mib_all_ip_args *all_ip)
+{
+	int hash_index;
+
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+        if(HWNAT_FAIL == update_mib_cnt_ip_by_ppe(all_ip, hash_index, 0))
+            return HWNAT_FAIL;
+        if(HWNAT_FAIL == update_mib_cnt_ip_by_ppe(all_ip, hash_index, 1))
+            return HWNAT_FAIL;
+	}
+	return HWNAT_SUCCESS;
+}
+EXPORT_SYMBOL(get_ppe_mib_ip);
+
+int ppe_get_agcnt(struct hwnat_ac_args *opt3)
+{
+	unsigned int ag_idx = 0;
+
+	ag_idx = opt3->ag_index;
+	if (ag_idx > 63)
+		return HWNAT_FAIL;
+
+#ifdef CONFIG_RA_HW_NAT_ACCNT_MAINTAINER
+		ac_info[ag_idx].ag_byte_cnt += reg_read(AC_BASE + ag_idx * 16);	/* 64bit bytes cnt */
+		ac_info[ag_idx].ag_byte_cnt +=
+		    ((unsigned long long)(reg_read(AC_BASE + ag_idx * 16 + 4)) << 32);
+		ac_info[ag_idx].ag_pkt_cnt += reg_read(AC_BASE + ag_idx * 16 + 8);	/* 32bites packet cnt */
+		opt3->ag_byte_cnt = ac_info[ag_idx].ag_byte_cnt;
+		opt3->ag_pkt_cnt = ac_info[ag_idx].ag_pkt_cnt;
+#else
+		opt3->ag_byte_cnt = reg_read(AC_BASE + ag_idx * 16);	/* 64bit bytes cnt */
+		opt3->ag_byte_cnt +=
+		    ((unsigned long long)(reg_read(AC_BASE + ag_idx * 16 + 4)) << 32);
+		opt3->ag_pkt_cnt = reg_read(AC_BASE + ag_idx * 16 + 8);	/* 32bites packet cnt */
+#endif
+	return HWNAT_SUCCESS;
+}
+
+int ppe_set_bind_threshold(uint32_t threshold)
+{
+	/* Set reach bind rate for unbind state */
+	reg_write(PPE_FOE_BNDR, threshold);
+	reg_write(PPE1_FOE_BNDR, threshold);
+
+	return HWNAT_SUCCESS;
+}
+
+int ppe_set_max_entry_limit(u32 full, uint32_t half, uint32_t qurt)
+{
+	/* Allowed max entries to be build during a time stamp unit */
+
+	/* smaller than 1/4 of total entries */
+	reg_modify_bits(PPE_FOE_LMT1, qurt, 0, 14);
+
+	/* between 1/2 and 1/4 of total entries */
+	reg_modify_bits(PPE_FOE_LMT1, half, 16, 14);
+
+	/* between full and 1/2 of total entries */
+	reg_modify_bits(PPE_FOE_LMT2, full, 0, 14);
+
+	return HWNAT_SUCCESS;
+}
+
+int ppe_set_ka_interval(u8 tcp_ka, uint8_t udp_ka)
+{
+	/* Keep alive time for bind FOE TCP entry */
+	reg_modify_bits(PPE_FOE_KA, tcp_ka, 16, 8);
+
+	/* Keep alive timer for bind FOE UDP entry */
+	reg_modify_bits(PPE_FOE_KA, udp_ka, 24, 8);
+
+	return HWNAT_SUCCESS;
+}
+
+int ppe_set_464_enable(int enable)
+{
+	u32 ppe_flow_set = reg_read(PPE_FLOW_SET);
+
+	if (enable) {
+		ppe_flow_set |= (BIT_IPV4_464XLAT_EN);
+	} else {
+		ppe_flow_set &= ~(BIT_IPV4_464XLAT_EN);
+	}
+	reg_write(PPE_FLOW_SET, ppe_flow_set);
+	reg_write(PPE1_FLOW_SET, ppe_flow_set);
+
+	return HWNAT_SUCCESS;
+}
+
+
+int ppe_set_unbind_lifetime(uint8_t lifetime)
+{
+	/* set Delta time for aging out an unbind FOE entry */
+	reg_modify_bits(PPE_FOE_UNB_AGE, lifetime, 0, 8);
+	reg_modify_bits(PPE1_FOE_UNB_AGE, lifetime, 0, 8);
+
+	return HWNAT_SUCCESS;
+}
+
+int ppe_set_bind_lifetime(u16 tcp_life, uint16_t udp_life, uint16_t fin_life)
+{
+	/* set Delta time for aging out an bind UDP FOE entry */
+	reg_modify_bits(PPE_FOE_BND_AGE0, udp_life, 0, 16);
+	reg_modify_bits(PPE1_FOE_BND_AGE0, udp_life, 0, 16);
+
+	/* set Delta time for aging out an bind TCP FIN FOE entry */
+	reg_modify_bits(PPE_FOE_BND_AGE1, fin_life, 16, 16);
+	reg_modify_bits(PPE1_FOE_BND_AGE1, fin_life, 16, 16);
+
+	/* set Delta time for aging out an bind TCP FOE entry */
+	reg_modify_bits(PPE_FOE_BND_AGE1, tcp_life, 0, 16);
+	reg_modify_bits(PPE1_FOE_BND_AGE1, tcp_life, 0, 16);
+
+	return HWNAT_SUCCESS;
+}
+
+int ppe_tbl_clear(void)
+{
+	u32 foe_tbl_size;
+
+	reg_modify_bits(PPE_FOE_CFG, ONLY_FWD_CPU, 4, 2);
+	foe_tbl_size = FOE_4TB_SIZ * sizeof(struct foe_entry);
+	memset(ppe_foe_base, 0, foe_tbl_size);
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+	reg_modify_bits(PPE_FOE_CFG, FWD_CPU_BUILD_ENTRY, 4, 2);
+
+	reg_modify_bits(PPE1_FOE_CFG, ONLY_FWD_CPU, 4, 2);
+	foe_tbl_size = FOE_4TB_SIZ * sizeof(struct foe_entry);
+	memset(ppe1_foe_base, 0, foe_tbl_size);
+	ppe_set_cache_ebl();	/*clear HWNAT cache */
+	reg_modify_bits(PPE1_FOE_CFG, FWD_CPU_BUILD_ENTRY, 4, 2);
+
+	return HWNAT_SUCCESS;
+}
+
+void dump_dport(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_IF_NUM; i++) {
+		if(dst_port[i] != NULL)
+			pr_notice("dst_port[%d] = %s\n", i, dst_port[i]->name);
+	}
+}
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ioctl.h b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ioctl.h
new file mode 100644
index 0000000..f856c22
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ioctl.h
@@ -0,0 +1,254 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef	__HW_NAT_IOCTL_H__
+#define	__HW_NAT_IOCTL_H__
+
+#include "hwnat_config.h"
+
+extern unsigned int hnat_chip_name;
+extern unsigned int fe_feature;
+extern struct net_device	*dst_port[64];
+extern unsigned int debug_PPP;
+
+#define HW_NAT_DUMP_CACHE_ENTRY         (0x24)
+#define HW_NAT_ADD_ENTRY		(0x01)
+#define HW_NAT_DEL_ENTRY		(0x38)
+#define HW_NAT_DUMP_ENTRY		(0x03)
+#define HW_NAT_GET_ALL_ENTRIES		(0x04)
+#define HW_NAT_BIND_ENTRY		(0x05)
+#define HW_NAT_UNBIND_ENTRY		(0x06)
+#define HW_NAT_INVALID_ENTRY		(0x07)
+#define HW_NAT_DEBUG			(0x08)
+
+#define HW_NAT_DROP_ENTRY		(0x36)
+#define HW_NAT_TBL_CLEAR		(0x37)
+
+#define HW_NAT_GET_AC_CNT		(0x09)
+#define HW_NAT_BIND_THRESHOLD		(0x16)
+#define HW_NAT_MAX_ENTRY_LMT		(0x17)
+#define HW_NAT_RULE_SIZE		(0x18)
+#define HW_NAT_KA_INTERVAL		(0x19)
+#define HW_NAT_UB_LIFETIME		(0x1A)
+#define HW_NAT_BIND_LIFETIME		(0x1B)
+#define HW_NAT_BIND_DIRECTION		(0x1C)
+#define HW_NAT_VLAN_ID			(0x1D)
+#define HW_NAT_MCAST_INS		(0x20)
+#define HW_NAT_MCAST_DEL		(0x21)
+#define HW_NAT_MCAST_DUMP		(0x22)
+#define HW_NAT_MIB_DUMP			(0x23)
+#define HW_NAT_MIB_DRAM_DUMP            (0x25)
+#define HW_NAT_MIB_GET			(0x26)
+#define HW_NAT_IPI_CTRL_FROM_EXTIF			(0x50)
+#define HW_NAT_IPI_CTRL_FROM_PPEHIT			(0x51)
+#define HW_NAT_DPORT			(0x52)
+#define HW_NAT_DEVNAME			"hwnat0"
+#define HW_NAT_MAJOR			(220)
+
+/* extern struct hwnat_ac_args ac_info[64]; */
+extern struct mib_entry *ppe_mib_base;
+enum hwnat_status {
+	HWNAT_SUCCESS = 0,
+	HWNAT_FAIL = 1,
+	HWNAT_ENTRY_NOT_FOUND = 2
+};
+
+struct hwnat_tuple {
+	unsigned short hash_index;
+	unsigned int pkt_type;
+	unsigned int   is_udp;
+	unsigned int fport;
+	unsigned int fqos;
+	unsigned int qid;
+	/* egress layer2 */
+	unsigned char dmac[6];
+	unsigned char smac[6];
+	unsigned short vlan1;
+	unsigned short vlan2;
+	unsigned short pppoe_id;
+
+	/* ingress layer3 */
+	unsigned int ing_sipv4;
+	unsigned int ing_dipv4;
+
+	unsigned int ing_sipv6_0;
+	unsigned int ing_sipv6_1;
+	unsigned int ing_sipv6_2;
+	unsigned int ing_sipv6_3;
+
+	unsigned int ing_dipv6_0;
+	unsigned int ing_dipv6_1;
+	unsigned int ing_dipv6_2;
+	unsigned int ing_dipv6_3;
+
+	/* egress layer3 */
+	unsigned int eg_sipv4;
+	unsigned int eg_dipv4;
+
+	unsigned int eg_sipv6_0;
+	unsigned int eg_sipv6_1;
+	unsigned int eg_sipv6_2;
+	unsigned int eg_sipv6_3;
+
+	unsigned int eg_dipv6_0;
+	unsigned int eg_dipv6_1;
+	unsigned int eg_dipv6_2;
+	unsigned int eg_dipv6_3;
+	unsigned char prot;
+	/*ingress layer4*/
+	unsigned short ing_sp;
+	unsigned short ing_dp;
+
+	/*egress layer4*/
+	unsigned short eg_sp;
+	unsigned short eg_dp;
+
+	unsigned char	ipv6_flowlabel;
+	unsigned char   pppoe_act;
+	unsigned int    vlan_layer;
+	unsigned char	dst_port;
+	unsigned int    dscp;
+	enum hwnat_status result;
+};
+
+struct hwnat_args {
+	enum hwnat_status result;
+	unsigned int entry_num;
+	unsigned int num_of_entries;
+	unsigned int debug;
+	unsigned int entry_state;	/* invalid=0, unbind=1, bind=2, fin=3 */
+	struct hwnat_tuple entries[0];
+};
+
+/*hnat qos*/
+struct hwnat_qos_args {
+	unsigned int enable:1;
+	unsigned int up:3;
+	unsigned int weight:3;	/*UP resolution */
+	unsigned int dscp:6;
+	unsigned int dscp_set:3;
+	unsigned int vpri:3;
+	unsigned int ac:2;
+	unsigned int mode:2;
+	unsigned int weight0:4;	/*WRR 4 queue weight */
+	unsigned int weight1:4;
+	unsigned int weight2:4;
+	unsigned int weight3:4;
+	enum hwnat_status result;
+};
+
+/*hnat config*/
+struct hwnat_config_args {
+	unsigned int bind_threshold:16;
+	unsigned int foe_full_lmt:14;
+	unsigned int foe_half_lmt:14;
+	unsigned int foe_qut_lmt:14;
+	unsigned int pre_acl:9;
+	unsigned int pre_meter:9;
+	unsigned int pre_ac:9;
+	unsigned int post_meter:9;
+	unsigned int post_ac:9;
+	unsigned int foe_tcp_ka:8;	/*unit 4 sec */
+	unsigned int foe_udp_ka:8;	/*unit 4 sec */
+	unsigned int foe_unb_dlta:8;	/*unit 1 sec */
+	unsigned int foe_tcp_dlta:16;	/*unit 1 sec */
+	unsigned int foe_udp_dlta:16;	/*unit 1 sec */
+	unsigned int foe_fin_dlta:16;	/*unit 1 sec */
+	unsigned int wan_vid:16;
+	unsigned int lan_vid:16;
+	unsigned int bind_dir:2;	/* 0=upstream, 1=downstream, 2=bi-direction */
+	enum hwnat_status result;
+};
+
+struct hwnat_ac_args {
+	unsigned int ag_index;
+	unsigned long long ag_byte_cnt;
+	unsigned long long ag_pkt_cnt;
+	enum hwnat_status result;
+};
+
+extern struct hwnat_ac_args ac_info[64];
+
+struct hwnat_mcast_args {
+	unsigned int    mc_vid:16;
+	unsigned int    mc_px_en:4;
+	unsigned int    valid:1;
+	unsigned int    rev2:3;
+	unsigned int    mc_px_qos_en:4;
+	unsigned int    mc_qos_qid:4;
+	unsigned char	dst_mac[6];
+};
+
+struct hwnat_mib_args {
+	unsigned int    entry_num:16;
+};
+
+struct hwnat_ipi_args {
+	unsigned int hnat_ipi_enable;
+	unsigned int drop_pkt;
+	unsigned int queue_thresh;
+	unsigned int ipi_cnt_mod;
+};
+
+int ppe_reg_ioctl_handler(void);
+void ppe_unreg_ioctl_handler(void);
+int ppe_get_agcnt(struct hwnat_ac_args *opt3);
+int reply_entry_idx(struct hwnat_tuple *opt, unsigned int entry_num);
+void ppe_mib_dump(unsigned int entry_num, unsigned long *pkt_cnt, unsigned long *byte_cnt);
+void ppe_mib_dram_dump(unsigned int entry_num);
+int get_ppe_mib(struct hwnat_tuple *opt, unsigned long *tx_pkt_cnt, unsigned long *tx_byte_cnt,
+		unsigned long *rx_pkt_cnt, unsigned long *rx_byte_cnt);
+int ppe_tbl_clear(void);
+void dump_dport(void);
+int ppe_set_ka_interval(unsigned char tcp_ka, unsigned char udp_ka);
+int ppe_set_464_enable(int enable);
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ipi.c b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ipi.c
new file mode 100755
index 0000000..e19bba5
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/hwnat_ipi.c
@@ -0,0 +1,859 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu_rmap.h>
+#include "ra_nat.h"
+#include "foe_fdb.h"
+hnat_ipi_s *hnat_ipi_from_extif[num_possible_cpus()] ____cacheline_aligned_in_smp;
+hnat_ipi_s *hnat_ipi_from_ppehit[num_possible_cpus()] ____cacheline_aligned_in_smp;
+hnat_ipi_stat *hnat_ipi_status[num_possible_cpus()] ____cacheline_aligned_in_smp;
+/* hnat_ipi_cfg hnat_ipi_config_ctx ____cacheline_aligned_in_smp; */
+hnat_ipi_cfg *hnat_ipi_config;/* = &hnat_ipi_config_ctx; */
+
+unsigned int dbg_var;
+unsigned int dbg_var2;
+struct timer_list ipi_monitor_timer_from_extif;
+struct timer_list ipi_monitor_timer_from_ppehit;
+
+int skb_get_rxhash_ipi(struct sk_buff *skb, u32 hflag)
+{
+	struct rps_dev_flow voidflow, *rflow = &voidflow;
+	int cpu;
+	unsigned char *old_hdr, *old_data;
+	unsigned short old_proto;
+
+	preempt_disable();
+	rcu_read_lock();
+#if defined(CONFIG_RAETH_QDMA)
+	if (hflag & HNAT_IPI_HASH_VTAG) {
+		struct vlan_ethhdr *veth;
+		u16 vir_if_idx;
+
+		/* veth = (struct vlan_ethhdr *)LAYER2_HEADER(skb); */
+		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+		/* something wrong */
+		if ((veth->h_vlan_proto != htons(ETH_P_8021Q)))
+			ipidbg[smp_processor_id()][6]++;
+
+		vir_if_idx = ntohs(veth->h_vlan_TCI);
+#if defined(CONFIG_ARCH_MT7622)
+		skb->hash = ((u32)vir_if_idx) << (32 - FOE_4TB_BIT);
+		skb->l4_hash = 1;
+#else
+		skb->rxhash = ((u32)vir_if_idx) << (32 - FOE_4TB_BIT);
+		skb->l4_rxhash = 1;
+#endif
+		old_data = skb->data;
+		skb->data += 4;
+		old_proto = skb->protocol;
+		skb->protocol = (*(u16 *)(skb->data - 2));
+	}
+#endif
+	/* old_hdr = skb->network_header; */
+
+	old_hdr = skb_network_header(skb);
+	/* old_hdr = skb->data; */
+	if (debug_level >= 2) {
+		pr_info("00 : skb->head = %p\n", skb->head);
+		pr_info("00 : skb->data = %p\n", skb->data);
+		pr_info("00 : skb->mac_header = %d\n", skb->mac_header);
+		pr_info("00 : skb->network_header = %d\n", skb->network_header);
+		pr_info("00 : old_hdr = %p\n", old_hdr);
+	}
+	cpu = get_rps_cpu(skb->dev, skb, &rflow);
+	if (debug_level >= 2) {
+		pr_info("11 : skb->head = %p\n", skb->head);
+		pr_info("11 : skb->data = %p\n", skb->data);
+		pr_info("11 : skb->mac_header = %d\n", skb->mac_header);
+		pr_info("11 : skb->network_header = %d\n", skb->network_header);
+		pr_info("11 : old_hdr = %p\n", old_hdr);
+	}
+	if (cpu < 0) {
+		cpu = smp_processor_id();
+		if (hflag & HNAT_IPI_HASH_FROM_EXTIF)
+			ipidbg[cpu][3]++;
+		else
+			ipidbg2[cpu][3]++;
+	}
+#if defined(CONFIG_RAETH_QDMA)
+	if (hflag & HNAT_IPI_HASH_VTAG) {
+		skb->data = old_data;
+		skb->protocol = old_proto;
+	}
+#endif
+	/* skb->network_header = old_hdr; */
+
+	skb_set_network_header(skb, (int)(old_hdr - skb->data));
+	if (debug_level >= 2) {
+		pr_info("22 : skb->head = %p\n", skb->head);
+		pr_info("22 : skb->data = %p\n", skb->data);
+		pr_info("22 : skb->mac_header = %d\n", skb->mac_header);
+		pr_info("22 : skb->network_header = %d\n", skb->network_header);
+		pr_info("22 : old_hdr = %p\n", old_hdr);
+	}
+	rcu_read_unlock();
+	preempt_enable();
+	return cpu;
+}
+
+void smp_func_call_BH_handler_from_extif(unsigned long data)
+{
+	struct sk_buff *skb_deq;
+	unsigned int  cpu_num = smp_processor_id();
+	unsigned int re_schedule_cnt = 0;
+	unsigned int b_reschedule = 0;
+	struct hnat_ipi_s *phnat_ipi = hnat_ipi_from_extif[cpu_num];
+	struct hnat_ipi_stat *phnat_ipi_status = hnat_ipi_status[cpu_num];
+
+	atomic_set(&phnat_ipi_status->cpu_status_from_extif, 1);
+#if defined(HNAT_IPI_DQ)
+	while (skb_queue_len(&phnat_ipi->skb_process_queue) > 0) {
+#elif defined(HNAT_IPI_RXQUEUE)
+	/* spin_lock(&phnat_ipi->ipilock); */
+	while (atomic_read(&phnat_ipi->rx_queue_num) > 0) {
+#else
+	while ((skb_queue_len(&phnat_ipi->skb_ipi_queue) > 0) && (hnat_ipi_config->enable_from_extif == 1)) {
+#endif
+
+#if defined(HNAT_IPI_DQ)
+		skb_deq = __skb_dequeue(&phnat_ipi->skb_process_queue);
+#elif defined(HNAT_IPI_RXQUEUE)
+		skb_deq = phnat_ipi->rx_queue[phnat_ipi->rx_queue_ridx];
+		phnat_ipi->rx_queue[phnat_ipi->rx_queue_ridx] = NULL;
+		phnat_ipi->rx_queue_ridx = (phnat_ipi->rx_queue_ridx + 1) % 1024;
+		atomic_sub(1, &phnat_ipi->rx_queue_num);
+#else
+		skb_deq = skb_dequeue(&phnat_ipi->skb_ipi_queue);
+#endif
+		if (skb_deq) {
+			ipidbg[cpu_num][8]++;
+			ppe_extif_rx_handler(skb_deq);
+		} else {
+			break;
+		}
+		re_schedule_cnt++;
+		if (re_schedule_cnt > hnat_ipi_config->queue_thresh_from_extif) {
+			ipidbg[cpu_num][9]++;
+			b_reschedule = 1;
+			break;
+		}
+	}
+#if defined(HNAT_IPI_DQ)
+	spin_lock(&phnat_ipi->ipilock);
+	if (skb_queue_len(&phnat_ipi->skb_process_queue) == 0) {
+		unsigned int qlen = skb_queue_len(&phnat_ipi->skb_input_queue);
+
+		if (qlen)
+			skb_queue_splice_tail_init(&phnat_ipi->skb_input_queue,
+						   &phnat_ipi->skb_process_queue);
+	}
+	spin_unlock(&phnat_ipi->ipilock);
+#endif
+#ifdef HNAT_IPI_RXQUEUE
+	/* spin_unlock(&phnat_ipi->ipilock); */
+#endif
+
+	/* atomic_set(&phnat_ipi_status->cpu_status_from_extif, 0); */
+	if (b_reschedule == 1)
+		tasklet_hi_schedule(&phnat_ipi->smp_func_call_tsk);
+	else
+		atomic_set(&phnat_ipi_status->cpu_status_from_extif, 0);
+}
+
+static void smp_func_call_from_extif(void *info)
+{
+	unsigned int cpu = smp_processor_id();
+	hnat_ipi_s *phnat_ipi = hnat_ipi_from_extif[cpu];
+
+	phnat_ipi->smp_func_call_tsk.data = cpu;
+	ipidbg[cpu][5]++;
+	if ((hnat_ipi_config->enable_from_extif == 1) && (phnat_ipi))
+		tasklet_hi_schedule(&phnat_ipi->smp_func_call_tsk);
+}
+
+void smp_func_call_BH_handler_from_ppehit(unsigned long data)
+{
+	struct sk_buff *skb_deq;
+	unsigned int  cpu_num = smp_processor_id();
+	unsigned int re_schedule_cnt = 0;
+	struct foe_entry *entry;
+	unsigned int b_reschedule = 0;
+	struct hnat_ipi_s *phnat_ipi = hnat_ipi_from_ppehit[cpu_num];
+	struct hnat_ipi_stat *phnat_ipi_status = hnat_ipi_status[cpu_num];
+
+	atomic_set(&phnat_ipi_status->cpu_status_from_ppehit, 1);
+#if defined(HNAT_IPI_DQ)
+	while (skb_queue_len(&phnat_ipi->skb_process_queue) > 0) {
+#elif defined(HNAT_IPI_RXQUEUE)
+	/* spin_lock(&phnat_ipi->ipilock); */
+	while (atomic_read(&phnat_ipi->rx_queue_num) > 0) {
+#else
+	while ((skb_queue_len(&phnat_ipi->skb_ipi_queue) > 0) && (hnat_ipi_config->enable_from_ppehit == 1)) {
+#endif
+#if defined(HNAT_IPI_DQ)
+		skb_deq = __skb_dequeue(&phnat_ipi->skb_process_queue);
+#elif defined(HNAT_IPI_RXQUEUE)
+		skb_deq = phnat_ipi->rx_queue[phnat_ipi->rx_queue_ridx];
+		phnat_ipi->rx_queue[phnat_ipi->rx_queue_ridx] = NULL;
+		phnat_ipi->rx_queue_ridx = (phnat_ipi->rx_queue_ridx + 1) % 1024;
+		atomic_sub(1, &phnat_ipi->rx_queue_num);
+#else
+		skb_deq = skb_dequeue(&phnat_ipi->skb_ipi_queue);
+#endif
+		if (skb_deq) {
+#if defined(CONFIG_RAETH_QDMA)
+			entry = NULL;
+#else
+			entry = &ppe_foe_base[FOE_ENTRY_NUM(skb_deq)];
+#endif
+			hitbind_force_to_cpu_handler(skb_deq, entry);
+		} else {
+			break;
+		}
+
+		re_schedule_cnt++;
+		if (re_schedule_cnt > hnat_ipi_config->queue_thresh_from_ppehit) {
+			ipidbg2[cpu_num][9]++;
+			b_reschedule = 1;
+			break;
+		}
+	}
+
+#if defined(HNAT_IPI_DQ)
+	spin_lock(&phnat_ipi->ipilock);
+	if (skb_queue_len(&phnat_ipi->skb_process_queue) == 0) {
+		unsigned int qlen = skb_queue_len(&phnat_ipi->skb_input_queue);
+
+		if (qlen)
+			skb_queue_splice_tail_init(&phnat_ipi->skb_input_queue,
+						   &phnat_ipi->skb_process_queue);
+	}
+	spin_unlock(&phnat_ipi->ipilock);
+#endif
+#ifdef HNAT_IPI_RXQUEUE
+	/* spin_unlock(&phnat_ipi->ipilock); */
+#endif
+
+	/* atomic_set(&phnat_ipi_status->cpu_status_from_ppehit, 0); */
+	if (b_reschedule == 1)
+		tasklet_hi_schedule(&phnat_ipi->smp_func_call_tsk);
+	else
+		atomic_set(&phnat_ipi_status->cpu_status_from_ppehit, 0);
+}
+
+static void smp_func_call_from_ppehit(void *info)
+{
+	unsigned int cpu = smp_processor_id();
+	struct hnat_ipi_s *phnat_ipi = hnat_ipi_from_ppehit[cpu];
+
+	phnat_ipi->smp_func_call_tsk.data = cpu;
+	ipidbg2[cpu][5]++;
+	if ((hnat_ipi_config->enable_from_ppehit == 1) && phnat_ipi)
+		tasklet_hi_schedule(&phnat_ipi->smp_func_call_tsk);
+}
+
+void sch_smp_call(int is_thecpu, struct hnat_ipi_s *phnat_ipi, unsigned int cpu_num)
+{
+	if (is_thecpu == 1) {
+		tasklet_hi_schedule(&phnat_ipi->smp_func_call_tsk);
+	} else {
+		smp_call_function_single(cpu_num, smp_func_call_from_extif, NULL, 0);
+		phnat_ipi->time_rec = jiffies;
+	}
+}
+
+int32_t hnat_ipi_extif_handler(struct sk_buff *skb)
+{
+	struct ethhdr *eth = (struct ethhdr *)(skb->data - ETH_HLEN);
+
+	unsigned int cpu_num;
+	unsigned int kickoff_ipi = 1;
+	int is_thecpu = 0;
+	struct hnat_ipi_s *phnat_ipi;
+	struct hnat_ipi_stat *phnat_ipi_stat;
+
+	dbg_var++;
+	if (dbg_var == 20)
+		pr_info("=== [FromExtIf]hnat_ipi_enable=%d, queue_thresh=%d, drop_pkt=%d ===\n",
+			hnat_ipi_config->enable_from_extif,
+						hnat_ipi_config->queue_thresh_from_extif,
+						hnat_ipi_config->drop_pkt_from_extif);
+	if (hnat_ipi_config->enable_from_extif == 1) {
+		/* unsigned long delta; */
+		/*unsigned long cur_jiffies = jiffies;*/
+		if (((skb->protocol != htons(ETH_P_8021Q)) &&
+		     (skb->protocol != htons(ETH_P_IP)) && (skb->protocol != htons(ETH_P_IPV6)) &&
+			(skb->protocol != htons(ETH_P_PPP_SES)) && (skb->protocol != htons(ETH_P_PPP_DISC))) ||
+			is_multicast_ether_addr(&eth->h_dest[0]))
+			return 1;
+
+		cpu_num = skb_get_rxhash_ipi(skb, HNAT_IPI_HASH_NORMAL | HNAT_IPI_HASH_FROM_EXTIF);
+		if (debug_level >= 1)
+			pr_info("%s: cpu_num =%d\n", __func__, cpu_num);
+		phnat_ipi_stat = hnat_ipi_status[cpu_num];
+		if (!phnat_ipi_stat)
+			goto DISABLE_EXTIF_IPI;
+		phnat_ipi = hnat_ipi_from_extif[cpu_num];
+		if (!phnat_ipi)
+			goto DISABLE_EXTIF_IPI;
+
+		phnat_ipi_stat->smp_call_cnt_from_extif++;
+		phnat_ipi->ipi_accum++;
+
+		if (phnat_ipi->ipi_accum >= hnat_ipi_config->ipi_cnt_mod_from_extif) {
+			kickoff_ipi = 1;
+			phnat_ipi->ipi_accum = 0;
+		} else {
+			kickoff_ipi = 0;
+		}
+
+		if (cpu_num == smp_processor_id())
+			is_thecpu = 1;
+			/* return ppe_extif_rx_handler(skb); */
+
+#if defined(HNAT_IPI_DQ)
+		if (skb_queue_len(&phnat_ipi->skb_input_queue) > hnat_ipi_config->drop_pkt_from_extif) {
+#elif defined(HNAT_IPI_RXQUEUE)
+		if (atomic_read(&phnat_ipi->rx_queue_num) >= (hnat_ipi_config->drop_pkt_from_extif - 1)) {
+#else
+		if (skb_queue_len(&phnat_ipi->skb_ipi_queue) > hnat_ipi_config->drop_pkt_from_extif) {
+#endif
+
+			dev_kfree_skb_any(skb);
+			phnat_ipi_stat->drop_pkt_num_from_extif++;
+			if (atomic_read(&phnat_ipi_stat->cpu_status_from_extif) <= 0) {
+				if (is_thecpu == 1) {
+					tasklet_hi_schedule(&phnat_ipi->smp_func_call_tsk);
+				} else {
+					smp_call_function_single(cpu_num, smp_func_call_from_extif, NULL, 0);
+					phnat_ipi->time_rec = jiffies;
+				}
+				goto drop_pkt;
+				/*return 0;*/
+				/* Drop packet */
+			} else {
+				if (atomic_read(&phnat_ipi_stat->cpu_status_from_extif) <= 0) {
+					/* idle state */
+#if (defined(HNAT_IPI_DQ) || defined(HNAT_IPI_RXQUEUE))
+					spin_lock(&phnat_ipi->ipilock);
+#endif
+#if defined(HNAT_IPI_DQ)
+					__skb_queue_tail(&phnat_ipi->skb_input_queue, skb);
+#elif defined(HNAT_IPI_RXQUEUE)
+					phnat_ipi->rx_queue[phnat_ipi->rx_queue_widx] = skb;
+					phnat_ipi->rx_queue_widx = (phnat_ipi->rx_queue_widx + 1) % 1024;
+					atomic_add(1, &phnat_ipi->rx_queue_num);
+#else
+					skb_queue_tail(&phnat_ipi->skb_ipi_queue, skb);
+#endif
+#if (defined(HNAT_IPI_DQ) || defined(HNAT_IPI_RXQUEUE))
+					spin_unlock(&phnat_ipi->ipilock);
+#endif
+					if (kickoff_ipi == 1)
+						sch_smp_call(is_thecpu, phnat_ipi, cpu_num);
+
+				} else {
+#if (defined(HNAT_IPI_DQ) || defined(HNAT_IPI_RXQUEUE))
+					spin_lock(&phnat_ipi->ipilock);
+#endif
+#if defined(HNAT_IPI_DQ)
+					__skb_queue_tail(&phnat_ipi->skb_input_queue, skb);
+#elif defined(HNAT_IPI_RXQUEUE)
+					phnat_ipi->rx_queue[phnat_ipi->rx_queue_widx] = skb;
+					phnat_ipi->rx_queue_widx = (phnat_ipi->rx_queue_widx + 1) % 1024;
+					atomic_add(1, &phnat_ipi->rx_queue_num);
+#else
+					skb_queue_tail(&phnat_ipi->skb_ipi_queue, skb);
+#endif
+#if (defined(HNAT_IPI_DQ) || defined(HNAT_IPI_RXQUEUE))
+					spin_unlock(&phnat_ipi->ipilock);
+#endif
+				}
+			}
+			if (debug_level >= 1)
+				pr_info("%s, return 0\n", __func__);
+
+			goto drop_pkt;
+			/*return 0;*/
+		} else {
+DISABLE_EXTIF_IPI:
+			return ppe_extif_rx_handler(skb);
+		}
+drop_pkt:
+		return 0;
+}
+
+int32_t hnat_ipi_force_cpu(struct sk_buff *skb)
+{
+	unsigned int cpu_num;
+#if defined(CONFIG_RAETH_QDMA)
+	struct foe_entry *entry = NULL;
+#else
+	/* struct foe_entry *entry = &PpeFoeBase[FOE_ENTRY_NUM(skb)]; */
+	struct foe_entry *entry = &ppe_foe_base[FOE_ENTRY_NUM(skb)];
+#endif
+	unsigned int kickoff_ipi = 1;
+	int is_thecpu = 0;
+
+	dbg_var2++;
+	if (dbg_var2 == 20)
+		pr_info("=== [FromPPE]hnat_ipi_enable=%d, queue_thresh=%d, drop_pkt=%d ===\n",
+			hnat_ipi_config->enable_from_ppehit,
+					hnat_ipi_config->queue_thresh_from_ppehit,
+					hnat_ipi_config->drop_pkt_from_ppehit);
+	if (hnat_ipi_config->enable_from_ppehit == 1) {
+		/*unsigned long cur_jiffies = jiffies;*/
+		/* unsigned long delta = 0; */
+		hnat_ipi_s *phnat_ipi;
+		hnat_ipi_stat *phnat_ipi_stat;
+
+		cpu_num = skb_get_rxhash_ipi(skb, HNAT_IPI_HASH_VTAG | HNAT_IPI_HASH_FROM_GMAC);
+		if (debug_level >= 1)
+			pr_info("%s: cpu_num =%d\n", __func__, cpu_num);
+		phnat_ipi = hnat_ipi_from_ppehit[cpu_num];
+		phnat_ipi_stat = hnat_ipi_status[cpu_num];
+		if (!phnat_ipi_stat)
+			goto DISABLE_PPEHIT_IPI;
+
+		if (!phnat_ipi)
+			goto DISABLE_PPEHIT_IPI;
+
+		phnat_ipi_stat->smp_call_cnt_from_ppehit++;
+		phnat_ipi->ipi_accum++;
+
+		if (phnat_ipi->ipi_accum >= hnat_ipi_config->ipi_cnt_mod_from_ppehit) {
+			kickoff_ipi = 1;
+			phnat_ipi->ipi_accum = 0;
+		} else {
+			kickoff_ipi = 0;
+		}
+
+		if (cpu_num == smp_processor_id())
+			is_thecpu = 1;
+			/* return  hitbind_force_to_cpu_handler(skb, foe_entry); */
+#if defined(HNAT_IPI_DQ)
+		if (skb_queue_len(&phnat_ipi->skb_input_queue) > hnat_ipi_config->drop_pkt_from_ppehit) {
+#elif defined(HNAT_IPI_RXQUEUE)
+		if (atomic_read(&phnat_ipi->rx_queue_num) >= (hnat_ipi_config->drop_pkt_from_ppehit - 1)) {
+#else
+		if (skb_queue_len(&phnat_ipi->skb_ipi_queue) > hnat_ipi_config->drop_pkt_from_ppehit) {
+#endif
+
+			dev_kfree_skb_any(skb);
+			phnat_ipi_stat->drop_pkt_num_from_ppehit++;
+				if (atomic_read(&phnat_ipi_stat->cpu_status_from_ppehit) <= 0) {
+					if (is_thecpu == 1)
+						tasklet_hi_schedule(&phnat_ipi->smp_func_call_tsk);
+					else
+						smp_call_function_single(cpu_num, smp_func_call_from_ppehit, NULL, 0);
+					phnat_ipi->time_rec = jiffies;
+				}
+			/*return 0;*/
+			/* Drop packet */
+		} else {
+			if (atomic_read(&phnat_ipi_stat->cpu_status_from_ppehit) <= 0) {
+#if (defined(HNAT_IPI_DQ) || defined(HNAT_IPI_RXQUEUE))
+				spin_lock(&phnat_ipi->ipilock);
+#endif
+		      /* idle state */
+#if defined(HNAT_IPI_DQ)
+				__skb_queue_tail(&phnat_ipi->skb_input_queue, skb);
+#elif defined(HNAT_IPI_RXQUEUE)
+				phnat_ipi->rx_queue[phnat_ipi->rx_queue_widx] = skb;
+				phnat_ipi->rx_queue_widx = (phnat_ipi->rx_queue_widx + 1) % 1024;
+				atomic_add(1, &phnat_ipi->rx_queue_num);
+#else
+				skb_queue_tail(&phnat_ipi->skb_ipi_queue, skb);
+#endif
+#if (defined(HNAT_IPI_DQ) || defined(HNAT_IPI_RXQUEUE))
+				spin_unlock(&phnat_ipi->ipilock);
+#endif
+				if (kickoff_ipi == 1) {
+					if (is_thecpu == 1)
+						tasklet_hi_schedule(&phnat_ipi->smp_func_call_tsk);
+					else
+						smp_call_function_single(cpu_num, smp_func_call_from_ppehit, NULL, 0);
+					phnat_ipi->time_rec = jiffies;
+				}
+			} else {
+#if (defined(HNAT_IPI_DQ) || defined(HNAT_IPI_RXQUEUE))
+					spin_lock(&phnat_ipi->ipilock);
+#endif
+#if defined(HNAT_IPI_DQ)
+					__skb_queue_tail(&phnat_ipi->skb_input_queue, skb);
+#elif defined(HNAT_IPI_RXQUEUE)
+					phnat_ipi->rx_queue[phnat_ipi->rx_queue_widx] = skb;
+					phnat_ipi->rx_queue_widx = (phnat_ipi->rx_queue_widx + 1) % 1024;
+					atomic_add(1, &phnat_ipi->rx_queue_num);
+#else
+					skb_queue_tail(&phnat_ipi->skb_ipi_queue, skb);
+#endif
+#if (defined(HNAT_IPI_DQ) || defined(HNAT_IPI_RXQUEUE))
+					spin_unlock(&phnat_ipi->ipilock);
+#endif
+			}
+		}
+			return 0;
+	} else {
+DISABLE_PPEHIT_IPI:
+			return hitbind_force_to_cpu_handler(skb, entry);
+	}
+}
+
+void ipi_monitor_from_extif(unsigned long data)
+{
+	int i;
+	unsigned long delta;
+	unsigned long cur_time;
+
+	if (hnat_ipi_config->enable_from_extif == 1) {
+		hnat_ipi_s *phnat_ipi;
+		hnat_ipi_stat *phnat_ipi_status;
+
+		cur_time = jiffies;
+
+		for (i = 0; i < num_possible_cpus(); i++) {
+			phnat_ipi = hnat_ipi_from_extif[i];
+			phnat_ipi_status = hnat_ipi_status[i];
+#if defined(HNAT_IPI_DQ)
+			if (((skb_queue_len(&phnat_ipi->skb_input_queue) > 0) ||
+			     (skb_queue_len(&phnat_ipi->skb_process_queue) > 0)) &&
+				(atomic_read(&phnat_ipi_status->cpu_status_from_extif) <= 0)) {
+#elif defined(HNAT_IPI_RXQUEUE)
+			spin_lock(&phnat_ipi->ipilock);
+			if ((atomic_read(&phnat_ipi->rx_queue_num) > 0) &&
+			    (atomic_read(&phnat_ipi_status->cpu_status_from_extif) <= 0)) {
+#else
+			if ((skb_queue_len(&phnat_ipi->skb_ipi_queue) > 0) &&
+			    (atomic_read(&phnat_ipi_status->cpu_status_from_extif) <= 0)) {
+#endif
+				delta = cur_time - phnat_ipi->time_rec;
+				if (delta > 1) {
+					smp_call_function_single(i, smp_func_call_from_extif, NULL, 0);
+					phnat_ipi->time_rec = jiffies;
+				}
+			}
+#ifdef HNAT_IPI_RXQUEUE
+			spin_unlock(&phnat_ipi->ipilock);
+#endif
+		}
+		mod_timer(&ipi_monitor_timer_from_extif, jiffies + 1);
+	}
+}
+
+void ipi_monitor_from_ppehit(unsigned long data)
+{
+	int i;
+	unsigned long delta;
+	unsigned long cur_time;
+
+	if (hnat_ipi_config->enable_from_ppehit == 1) {
+		hnat_ipi_s *phnat_ipi;
+		hnat_ipi_stat *phnat_ipi_status;
+
+		cur_time = jiffies;
+		for (i = 0; i < num_possible_cpus(); i++) {
+			phnat_ipi = hnat_ipi_from_ppehit[i];
+			phnat_ipi_status = hnat_ipi_status[i];
+#if defined(HNAT_IPI_DQ)
+			if (((skb_queue_len(&phnat_ipi->skb_input_queue) > 0) ||
+			     (skb_queue_len(&phnat_ipi->skb_process_queue) > 0)) &&
+				(atomic_read(&phnat_ipi_status->cpu_status_from_ppehit) <= 0)) {
+#elif defined(HNAT_IPI_RXQUEUE)
+			spin_lock(&phnat_ipi->ipilock);
+			if ((atomic_read(&phnat_ipi->rx_queue_num) > 0) &&
+			    (atomic_read(&phnat_ipi_status->cpu_status_from_ppehit) <= 0)) {
+#else
+			if ((skb_queue_len(&phnat_ipi->skb_ipi_queue) > 0) &&
+			    (atomic_read(&phnat_ipi_status->cpu_status_from_ppehit) <= 0)) {
+#endif
+				delta = cur_time - phnat_ipi->time_rec;
+				if (delta > 1) {
+					smp_call_function_single(i, smp_func_call_from_ppehit, NULL, 0);
+					phnat_ipi->time_rec = jiffies;
+				}
+			}
+#ifdef HNAT_IPI_RXQUEUE
+			spin_unlock(&phnat_ipi->ipilock);
+#endif
+		}
+	  mod_timer(&ipi_monitor_timer_from_ppehit, jiffies + 1);
+	}
+}
+
+int hnat_ipi_init(void)
+{
+	int i;
+	   /* pr_info("========= %s(%d)[%s]: init HNAT IPI [%d CPUs](%d) =========\n\n",*/
+	   /*__func__, __LINE__,__TIME__,num_possible_cpus(),sizeof(hnat_ipi_s)); */
+	pr_info("========= %s: init HNAT IPI [%d CPUs](%lu) =========\n\n", __func__,
+		num_possible_cpus(), sizeof(hnat_ipi_s));
+	  /* hnat_ipi_config = &hnat_ipi_config_ctx; */
+/* hnat_ipi_from_extif[0] = kzalloc(sizeof(hnat_ipi_s)*num_possible_cpus(), GFP_ATOMIC); */
+ /* hnat_ipi_from_ppehit[0] = kzalloc(sizeof(hnat_ipi_s)*num_possible_cpus(), GFP_ATOMIC); */
+ /* hnat_ipi_status[0] = kzalloc(sizeof(hnat_ipi_stat)*num_possible_cpus(), GFP_ATOMIC); */
+
+	hnat_ipi_from_extif[0] = kzalloc((sizeof(hnat_ipi_s) * 2 + sizeof(hnat_ipi_stat)) * num_possible_cpus() +
+				sizeof(hnat_ipi_config), GFP_ATOMIC);
+	hnat_ipi_from_ppehit[0] = ((hnat_ipi_s *)hnat_ipi_from_extif[0]) + sizeof(hnat_ipi_s) * num_possible_cpus();
+	hnat_ipi_status[0] = ((hnat_ipi_stat *)hnat_ipi_from_ppehit[0]) + sizeof(hnat_ipi_s) * num_possible_cpus();
+	hnat_ipi_config = ((hnat_ipi_cfg *)hnat_ipi_status[0]) + sizeof(hnat_ipi_stat) * num_possible_cpus();
+	if ((!hnat_ipi_from_extif[0]) || (!hnat_ipi_from_ppehit[0]) ||
+	    (!hnat_ipi_status[0]) || (!hnat_ipi_config)) {
+		kfree(hnat_ipi_from_extif[0]);
+/* if (hnat_ipi_from_ppehit[0]) */
+  /* kfree(hnat_ipi_from_ppehit[0]); */
+  /* if (hnat_ipi_status[0]) */
+  /* kfree(hnat_ipi_status[0]); */
+		pr_info("Hnat IPI allocation failed\n");
+		return -1;
+	}
+	memset(hnat_ipi_config, 0, sizeof(hnat_ipi_cfg));
+	for (i = 0; i < num_possible_cpus(); i++) {
+		hnat_ipi_from_extif[i] = hnat_ipi_from_extif[0] + 1 * i;
+		hnat_ipi_from_ppehit[i] = hnat_ipi_from_ppehit[0] + 1 * i;
+		hnat_ipi_status[i] = hnat_ipi_status[0] + 1 * i;
+	/* pr_info("hnat_ipi_from_extif[%d]=0x%x\n",i,hnat_ipi_from_extif[i]); */
+	/* pr_info("hnat_ipi_from_ppehit[%d]=0x%x\n",i,hnat_ipi_from_ppehit[i]); */
+	/* pr_info("hnat_ipi_status[%d]=0x%x\n",i,hnat_ipi_status[i]); */
+
+#if (defined(HNAT_IPI_RXQUEUE) || defined(HNAT_IPI_DQ))
+		spin_lock_init(&hnat_ipi_from_extif[i]->ipilock);
+		spin_lock_init(&hnat_ipi_from_ppehit[i]->ipilock);
+#endif
+#if defined(HNAT_IPI_RXQUEUE)
+		/*hnat_ipi_from_extif[i]->rx_queue = kmalloc(sizeof(struct sk_buff) * 1024, GFP_KERNEL);*/
+		hnat_ipi_from_extif[i]->rx_queue = kmalloc(sizeof(*hnat_ipi_from_extif[i]->rx_queue), GFP_KERNEL);
+		atomic_set(&hnat_ipi_from_extif[i]->rx_queue_num, 0);
+		hnat_ipi_from_extif[i]->rx_queue_widx = 0;
+		hnat_ipi_from_extif[i]->rx_queue_ridx = 0;
+
+		/*hnat_ipi_from_ppehit[i]->rx_queue = kmalloc(sizeof(struct sk_buff) * 1024, GFP_KERNEL);*/
+		hnat_ipi_from_ppehit[i]->rx_queue = kmalloc(sizeof(*hnat_ipi_from_ppehit[i]->rx_queue), GFP_KERNEL);
+		atomic_set(&hnat_ipi_from_ppehit[i]->rx_queue_num, 0);
+		hnat_ipi_from_ppehit[i]->rx_queue_widx = 0;
+		hnat_ipi_from_ppehit[i]->rx_queue_ridx = 0;
+
+#elif defined(HNAT_IPI_DQ)
+		skb_queue_head_init(&hnat_ipi_from_extif[i]->skb_input_queue);
+		skb_queue_head_init(&hnat_ipi_from_extif[i]->skb_process_queue);
+
+		skb_queue_head_init(&hnat_ipi_from_ppehit[i]->skb_input_queue);
+		skb_queue_head_init(&hnat_ipi_from_ppehit[i]->skb_process_queue);
+#else
+		skb_queue_head_init(&hnat_ipi_from_extif[i]->skb_ipi_queue);
+		skb_queue_head_init(&hnat_ipi_from_ppehit[i]->skb_ipi_queue);
+#endif
+		atomic_set(&hnat_ipi_status[i]->cpu_status_from_extif, 0);
+		hnat_ipi_status[i]->drop_pkt_num_from_extif = 0;
+		hnat_ipi_status[i]->smp_call_cnt_from_extif = 0;
+		tasklet_init(&hnat_ipi_from_extif[i]->smp_func_call_tsk, smp_func_call_BH_handler_from_extif, 0);
+
+		atomic_set(&hnat_ipi_status[i]->cpu_status_from_ppehit, 0);
+		hnat_ipi_status[i]->drop_pkt_num_from_ppehit = 0;
+		hnat_ipi_status[i]->smp_call_cnt_from_ppehit = 0;
+		tasklet_init(&hnat_ipi_from_ppehit[i]->smp_func_call_tsk, smp_func_call_BH_handler_from_ppehit, 0);
+	}
+
+	memset(ipidbg, 0, sizeof(ipidbg));
+	memset(ipidbg2, 0, sizeof(ipidbg2));
+
+	ipi_monitor_timer_from_extif.function = NULL;
+	ipi_monitor_timer_from_ppehit.function = NULL;
+	pr_info("========= %s(%d): init HNAT IPI =========\n\n", __func__, __LINE__);
+	return 0;
+}
+
+int hnat_ipi_de_init(void)
+{
+	int i, j;
+	struct sk_buff *skb_deq = NULL;
+	unsigned int current_ipi_enable_from_extif = hnat_ipi_config->enable_from_extif;
+	unsigned int current_ipi_enable_from_ppehit = hnat_ipi_config->enable_from_ppehit;
+	struct hnat_ipi_s *phnat_ipi_from_extif;
+	struct hnat_ipi_s *phnat_ipi_from_ppehit;
+	struct hnat_ipi_stat *phnat_ipi_status;
+
+	hnat_ipi_config->enable_from_extif = 0;
+	hnat_ipi_config->enable_from_ppehit = 0;
+	if (ipi_monitor_timer_from_extif.function)
+		del_timer_sync(&ipi_monitor_timer_from_extif);
+	if (ipi_monitor_timer_from_ppehit.function)
+		del_timer_sync(&ipi_monitor_timer_from_ppehit);
+
+	for (i = 0; i < num_possible_cpus(); i++) {
+	/* int qlen; */
+		phnat_ipi_from_extif = hnat_ipi_from_extif[i];
+		phnat_ipi_from_ppehit = hnat_ipi_from_ppehit[i];
+		phnat_ipi_status = hnat_ipi_status[i];
+
+		if (current_ipi_enable_from_extif == 1) {
+			while (1) {
+				if (atomic_read(&phnat_ipi_status->cpu_status_from_extif) >= 1)
+					break;
+			}
+		}
+
+		if (current_ipi_enable_from_ppehit) {
+			while (1) {
+				if (atomic_read(&phnat_ipi_status->cpu_status_from_ppehit) >= 1)
+					break;
+			}
+		}
+
+		if (current_ipi_enable_from_extif == 1)
+			tasklet_kill(&phnat_ipi_from_extif->smp_func_call_tsk);
+		if (current_ipi_enable_from_ppehit == 1)
+			tasklet_kill(&phnat_ipi_from_ppehit->smp_func_call_tsk);
+
+#if defined(HNAT_IPI_DQ)
+		for (j = 0; j < phnat_ipi_from_extif->skb_input_queue.qlen; j++) {
+			skb_deq = skb_dequeue(&phnat_ipi_from_extif->skb_input_queue);
+			if (skb_deq)
+				dev_kfree_skb_any(skb_deq);
+		}
+
+		for (j = 0; j < phnat_ipi_from_ppehit->skb_input_queue.qlen; j++) {
+			skb_deq = skb_dequeue(&phnat_ipi_from_ppehit->skb_input_queue);
+			if (skb_deq)
+				dev_kfree_skb_any(skb_deq);
+			else
+			break;
+		}
+		for (j = 0; j < phnat_ipi_from_extif->skb_process_queue.qlen; j++) {
+			skb_deq = skb_dequeue(&phnat_ipi_from_extif->skb_process_queue);
+			if (skb_deq)
+				dev_kfree_skb_any(skb_deq);
+		}
+		for (j = 0; j < phnat_ipi_from_ppehit->skb_process_queue.qlen; j++) {
+			skb_deq = skb_dequeue(&phnat_ipi_from_ppehit->skb_process_queue);
+			if (skb_deq)
+				dev_kfree_skb_any(skb_deq);
+		}
+#elif defined(HNAT_IPI_RXQUEUE)
+		qlen = atomic_read(&phnat_ipi_from_extif->rx_queue_num);
+		for (j = 0; j < qlen; j++) {
+			skb_deq = phnat_ipi_from_extif->rx_queue[phnat_ipi_from_extif->rx_queue_ridx];
+			if (skb_deq)
+				dev_kfree_skb_any(skb_deq);
+			phnat_ipi_from_extif->rx_queue[phnat_ipi_from_extif->rx_queue_ridx] = NULL;
+			phnat_ipi_from_extif->rx_queue_ridx = (phnat_ipi_from_extif->rx_queue_ridx + 1) % 1024;
+		}
+		qlen = atomic_read(&phnat_ipi_from_ppehit->rx_queue_num);
+
+		for (j = 0; j < qlen; j++) {
+			skb_deq = phnat_ipi_from_ppehit->rx_queue[phnat_ipi_from_ppehit->rx_queue_ridx];
+			if (skb_deq)
+				dev_kfree_skb_any(skb_deq);
+			phnat_ipi_from_ppehit->rx_queue[phnat_ipi_from_ppehit->rx_queue_ridx] = NULL;
+			phnat_ipi_from_ppehit->rx_queue_ridx = (phnat_ipi_from_ppehit->rx_queue_ridx + 1) % 1024;
+		}
+		kfree(phnat_ipi_from_extif->rx_queue);
+		kfree(phnat_ipi_from_ppehit->rx_queue);
+#else
+		qlen = skb_queue_len(&phnat_ipi_from_extif->skb_ipi_queue);
+		for (j = 0; j < qlen; j++) {
+			skb_deq = skb_dequeue(&phnat_ipi_from_extif->skb_ipi_queue);
+			if (skb_deq)
+				dev_kfree_skb_any(skb_deq);
+			else
+				break;
+		}
+		qlen = skb_queue_len(&phnat_ipi_from_ppehit->skb_ipi_queue);
+		for (j = 0; j < qlen; j++) {
+			skb_deq = skb_dequeue(&phnat_ipi_from_ppehit->skb_ipi_queue);
+			if (skb_deq)
+				dev_kfree_skb_any(skb_deq);
+			else
+				break;
+		}
+#endif
+	}
+	hnat_ipi_s *phnat_ipi = hnat_ipi_from_extif[0];
+
+	/* hnat_ipi_stat* phnat_ipi_status = hnat_ipi_status[0]; */
+
+	kfree(phnat_ipi);
+/* phnat_ipi = hnat_ipi_from_ppehit[0]; */
+/* if (phnat_ipi) */
+/* kfree(phnat_ipi); */
+/* if (phnat_ipi_status) */
+/* kfree(phnat_ipi_status); */
+
+	ipi_monitor_timer_from_extif.function = NULL;
+	ipi_monitor_timer_from_ppehit.function = NULL;
+
+	return 0;
+}
+
+int hnat_ipi_timer_setup(void)
+{
+	if ((hnat_ipi_config->enable_from_extif == 1) &&
+	    (!ipi_monitor_timer_from_extif.function)) {
+		init_timer(&ipi_monitor_timer_from_extif);
+		ipi_monitor_timer_from_extif.function = ipi_monitor_from_extif;
+		ipi_monitor_timer_from_extif.expires = jiffies + 1;
+		add_timer(&ipi_monitor_timer_from_extif);
+		return 0;
+	}
+	if ((hnat_ipi_config->enable_from_ppehit == 1) &&
+	    (!ipi_monitor_timer_from_ppehit.function)) {
+		init_timer(&ipi_monitor_timer_from_ppehit);
+		ipi_monitor_timer_from_ppehit.function = ipi_monitor_from_ppehit;
+		ipi_monitor_timer_from_ppehit.expires = jiffies + 1;
+		add_timer(&ipi_monitor_timer_from_ppehit);
+		return 0;
+	}
+	return 0;
+}
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/mcast_tbl.c b/src/kernel/modules/netsys_driver/nat/hw_nat/mcast_tbl.c
new file mode 100644
index 0000000..2799b9c
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/mcast_tbl.c
@@ -0,0 +1,441 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include "frame_engine.h"
+#include "mcast_tbl.h"
+#include "util.h"
+#include "hnat_config.h"
+#include "hnat_define.h"
+
+int32_t mcast_entry_get(u16 vlan_id, uint8_t *dst_mac)
+{
+	int i;
+
+	for (i = 0; i < MAX_MCAST_ENTRY; i++) {
+		if ((GET_PPE_MCAST_H(i)->mc_vid == vlan_id) &&
+		    GET_PPE_MCAST_L(i)->mc_mac_addr[3] == dst_mac[2] &&
+			GET_PPE_MCAST_L(i)->mc_mac_addr[2] == dst_mac[3] &&
+			GET_PPE_MCAST_L(i)->mc_mac_addr[1] == dst_mac[4] &&
+			GET_PPE_MCAST_L(i)->mc_mac_addr[0] == dst_mac[5]) {
+			if (GET_PPE_MCAST_H(i)->mc_mpre_sel == 0) {
+				if (dst_mac[0] == 0x1 && dst_mac[1] == 0x00)
+					return i;
+			} else if (GET_PPE_MCAST_H(i)->mc_mpre_sel == 1) {
+				if (dst_mac[0] == 0x33 && dst_mac[1] == 0x33)
+					return i;
+			} else {
+				continue;
+			}
+		}
+	}
+	if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+		for (i = 0; i < MAX_MCAST_ENTRY16_63; i++) {
+			if ((GET_PPE_MCAST_H10(i)->mc_vid == vlan_id) &&
+			    GET_PPE_MCAST_L10(i)->mc_mac_addr[3] == dst_mac[2] &&
+				GET_PPE_MCAST_L10(i)->mc_mac_addr[2] == dst_mac[3] &&
+				GET_PPE_MCAST_L10(i)->mc_mac_addr[1] == dst_mac[4] &&
+				GET_PPE_MCAST_L10(i)->mc_mac_addr[0] == dst_mac[5]) {
+				if (GET_PPE_MCAST_H10(i)->mc_mpre_sel == 0) {
+					if (dst_mac[0] == 0x1 && dst_mac[1] == 0x00)
+						return (i + 16);
+				} else if (GET_PPE_MCAST_H10(i)->mc_mpre_sel == 1) {
+					if (dst_mac[0] == 0x33 && dst_mac[1] == 0x33)
+						return (i + 16);
+				} else {
+					continue;
+				}
+			}
+		}
+	}
+	return -1;
+}
+
+/*  mc_px_en: enable multicast to port x*/
+/*  mc_px_qos_en: enable QoS for multicast to port x*/
+/*  - multicast port0 map to PDMA*/
+/*  - multicast port1 map to GMAC1*/
+/*  - multicast port2 map to GMAC2*/
+/*  - multicast port3 map to QDMA*/
+
+int foe_mcast_entry_ins(u16 vlan_id, uint8_t *dst_mac, uint8_t mc_px_en,
+			u8 mc_px_qos_en, uint8_t mc_qos_qid)
+{
+	int i = 0;
+	int entry_num;
+	struct ppe_mcast_h *mcast_h;
+	struct ppe_mcast_l *mcast_l;
+
+	pr_info("%s: vid=%x mac=%x:%x:%x:%x:%x:%x mc_px_en=%x mc_px_qos_en=%x, mc_qos_qid=%d\n",
+		__func__, vlan_id, dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], dst_mac[4],
+		dst_mac[5], mc_px_en, mc_px_qos_en, mc_qos_qid);
+	entry_num = mcast_entry_get(vlan_id, dst_mac);
+    /* update exist entry */
+	if (entry_num >= 0) {
+		pr_info("update exist entry %d\n", entry_num);
+		if (entry_num < 16) {
+			mcast_h = GET_PPE_MCAST_H(entry_num);
+			mcast_l = GET_PPE_MCAST_L(entry_num);
+
+			if (dst_mac[0] == 0x1 && dst_mac[1] == 0x00)
+				mcast_h->mc_mpre_sel = 0;
+			else if (dst_mac[0] == 0x33 && dst_mac[1] == 0x33)
+				mcast_h->mc_mpre_sel = 1;
+			else
+				return 0;
+
+			mcast_h->mc_px_en = mc_px_en;
+			mcast_h->mc_px_qos_en = mc_px_qos_en;
+			/* mcast_h->mc_qos_qid = mc_qos_qid; */
+			if (mc_qos_qid < 16) {
+				mcast_h->mc_qos_qid = mc_qos_qid;
+			} else if (mc_qos_qid > 15) {
+				if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+					mcast_h->mc_qos_qid = mc_qos_qid & 0xf;
+					mcast_h->mc_qos_qid54 = (mc_qos_qid & 0x30) >> 4;
+				}
+			}
+			return 1;
+		}
+		if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+			if (entry_num >= 16) {
+				mcast_h = GET_PPE_MCAST_H10(entry_num - 16);
+				mcast_l = GET_PPE_MCAST_L10(entry_num - 16);
+
+				if (dst_mac[0] == 0x1 && dst_mac[1] == 0x00)
+					mcast_h->mc_mpre_sel = 0;
+				else if (dst_mac[0] == 0x33 && dst_mac[1] == 0x33)
+					mcast_h->mc_mpre_sel = 1;
+				else
+				    return 0;
+
+				mcast_h->mc_px_en = mc_px_en;
+				mcast_h->mc_px_qos_en = mc_px_qos_en;
+				/* mcast_h->mc_qos_qid = mc_qos_qid; */
+				if (mc_qos_qid < 16) {
+					mcast_h->mc_qos_qid = mc_qos_qid;
+				} else if (mc_qos_qid > 15) {
+					mcast_h->mc_qos_qid = mc_qos_qid & 0xf;
+					mcast_h->mc_qos_qid54 = (mc_qos_qid & 0x30) >> 4;
+				}
+			}
+			return 1;
+		}
+	} else { /* create new entry */
+
+		for (i = 0; i < MAX_MCAST_ENTRY; i++) {
+			mcast_h = GET_PPE_MCAST_H(i);
+			mcast_l = GET_PPE_MCAST_L(i);
+			if (mcast_h->valid == 0) {
+				if (dst_mac[0] == 0x1 && dst_mac[1] == 0x00)
+					mcast_h->mc_mpre_sel = 0;
+				else if (dst_mac[0] == 0x33 && dst_mac[1] == 0x33)
+					mcast_h->mc_mpre_sel = 1;
+				else
+					return 0;
+
+				mcast_h->mc_vid = vlan_id;
+				mcast_h->mc_px_en = mc_px_en;
+				mcast_h->mc_px_qos_en = mc_px_qos_en;
+				mcast_l->mc_mac_addr[3] = dst_mac[2];
+				mcast_l->mc_mac_addr[2] = dst_mac[3];
+				mcast_l->mc_mac_addr[1] = dst_mac[4];
+				mcast_l->mc_mac_addr[0] = dst_mac[5];
+				mcast_h->valid = 1;
+			/* mcast_h->mc_qos_qid = mc_qos_qid; */
+				if (mc_qos_qid < 16) {
+					mcast_h->mc_qos_qid = mc_qos_qid;
+				} else if (mc_qos_qid > 15) {
+					if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+						mcast_h->mc_qos_qid = mc_qos_qid & 0xf;
+						mcast_h->mc_qos_qid54 = (mc_qos_qid & 0x30) >> 4;
+					}
+				}
+				return 1;
+			}
+		}
+
+		if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+			for (i = 0; i < MAX_MCAST_ENTRY16_63; i++) {
+				mcast_h = GET_PPE_MCAST_H10(i);
+				mcast_l = GET_PPE_MCAST_L10(i);
+
+				if (mcast_h->valid == 0) {
+					if (dst_mac[0] == 0x1 && dst_mac[1] == 0x00)
+						mcast_h->mc_mpre_sel = 0;
+					else if (dst_mac[0] == 0x33 && dst_mac[1] == 0x33)
+						mcast_h->mc_mpre_sel = 1;
+					else
+						return 0;
+
+					mcast_h->mc_vid = vlan_id;
+					mcast_h->mc_px_en = mc_px_en;
+					mcast_h->mc_px_qos_en = mc_px_qos_en;
+					mcast_l->mc_mac_addr[3] = dst_mac[2];
+					mcast_l->mc_mac_addr[2] = dst_mac[3];
+					mcast_l->mc_mac_addr[1] = dst_mac[4];
+					mcast_l->mc_mac_addr[0] = dst_mac[5];
+					mcast_h->valid = 1;
+					 /* mcast_h->mc_qos_qid = mc_qos_qid; */
+					if (mc_qos_qid < 16) {
+						mcast_h->mc_qos_qid = mc_qos_qid;
+					} else if (mc_qos_qid > 15) {
+						mcast_h->mc_qos_qid = mc_qos_qid & 0xf;
+						mcast_h->mc_qos_qid54 = (mc_qos_qid & 0x30) >> 4;
+					}
+				    return 1;
+			      }
+			}
+		}
+	}
+	MCAST_PRINT("HNAT: Multicast Table is FULL!!\n");
+	return 0;
+}
+
+int foe_mcast_entry_qid(u16 vlan_id, uint8_t *dst_mac, uint8_t mc_qos_qid)
+{
+	int entry_num;
+	struct ppe_mcast_h *mcast_h;
+
+	if (debug_level >= 1)
+		pr_info("%s: vid=%x mac=%x:%x:%x:%x:%x:%x mc_qos_qid=%d\n",
+			__func__, vlan_id, dst_mac[0], dst_mac[1], dst_mac[2],
+			dst_mac[3], dst_mac[4], dst_mac[5], mc_qos_qid);
+
+	entry_num = mcast_entry_get(vlan_id, dst_mac);
+	/* update exist entry */
+	if (entry_num >= 0) {
+		if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+			if (entry_num <= 15)
+				mcast_h = GET_PPE_MCAST_H(entry_num);
+			else
+				mcast_h = GET_PPE_MCAST_H10(entry_num - 16);
+
+					if (mc_qos_qid < 16) {
+						/* mcast_h->mc_qos_qid = mc_qos_qid; */
+					} else if (mc_qos_qid > 15) {
+						/* mcast_h->mc_qos_qid = mc_qos_qid & 0xf; */
+						/* mcast_h->mc_qos_qid54 = (mc_qos_qid & 0x30) >> 4; */
+					} else {
+						pr_info("Error qid = %d\n", mc_qos_qid);
+						return 0;
+					}
+		} else {
+				mcast_h = GET_PPE_MCAST_H(entry_num);
+				mcast_h->mc_qos_qid = mc_qos_qid;
+		}
+		return 1;
+	}
+
+	return 0;
+}
+
+/* Return:*/
+/*	    0: entry found*/
+/*	    1: entry not found*/
+int foe_mcast_entry_del(u16 vlan_id, uint8_t *dst_mac, uint8_t mc_px_en,
+			u8 mc_px_qos_en, uint8_t mc_qos_qid)
+{
+	int entry_num;
+	struct ppe_mcast_h *mcast_h;
+	struct ppe_mcast_l *mcast_l;
+
+	pr_info("%s: vid=%x mac=%x:%x:%x:%x:%x:%x mc_px_en=%x mc_px_qos_en=%x mc_qos_qid=%d\n",
+		__func__, vlan_id, dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], dst_mac[4],
+		dst_mac[5], mc_px_en, mc_px_qos_en, mc_qos_qid);
+	entry_num = mcast_entry_get(vlan_id, dst_mac);
+	if (entry_num >= 0) {
+		pr_info("entry_num = %d\n", entry_num);
+		if (entry_num <= 15) {
+			mcast_h = GET_PPE_MCAST_H(entry_num);
+			mcast_l = GET_PPE_MCAST_L(entry_num);
+			mcast_h->mc_px_en &= ~mc_px_en;
+			mcast_h->mc_px_qos_en &= ~mc_px_qos_en;
+			if (mcast_h->mc_px_en == 0 && mcast_h->mc_px_qos_en == 0) {
+				mcast_h->valid = 0;
+				mcast_h->mc_vid = 0;
+				mcast_h->mc_qos_qid = 0;
+				if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT))
+					mcast_h->mc_qos_qid54 = 0;
+				memset(&mcast_l->mc_mac_addr, 0, 4);
+			}
+		}
+			if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+				if (entry_num > 15) {
+					mcast_h = GET_PPE_MCAST_H10(entry_num - 16);
+					mcast_l = GET_PPE_MCAST_L10(entry_num - 16);
+					mcast_h->mc_px_en &= ~mc_px_en;
+					mcast_h->mc_px_qos_en &= ~mc_px_qos_en;
+					if (mcast_h->mc_px_en == 0 && mcast_h->mc_px_qos_en == 0) {
+						mcast_h->valid = 0;
+						mcast_h->mc_vid = 0;
+						mcast_h->mc_qos_qid = 0;
+						mcast_h->mc_qos_qid54 = 0;
+						memset(&mcast_l->mc_mac_addr, 0, 4);
+					}
+				}
+			}
+		} else {
+			pr_info("foe_mcast_entry_del fail: entry_number = %d\n", entry_num);
+			return 1;
+		}
+	return 0;
+}
+
+void foe_mcast_entry_dump(void)
+{
+	int i;
+	struct ppe_mcast_h *mcast_h;
+	struct ppe_mcast_l *mcast_l;
+
+	pr_info("MAC | VID | PortMask | QosPortMask\n");
+	for (i = 0; i < MAX_MCAST_ENTRY; i++) {
+		mcast_h = GET_PPE_MCAST_H(i);
+		mcast_l = GET_PPE_MCAST_L(i);
+		if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+			pr_info("%x:%x:%x:%x  %d  %c%c%c%c %c%c%c%c (QID=%d, mc_mpre_sel=%d)\n",
+				mcast_l->mc_mac_addr[3],
+				mcast_l->mc_mac_addr[2],
+				mcast_l->mc_mac_addr[1],
+				mcast_l->mc_mac_addr[0],
+				mcast_h->mc_vid,
+				(mcast_h->mc_px_en & 0x08) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x04) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x02) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x01) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x08) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x04) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x02) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x01) ? '1' : '-',
+				mcast_h->mc_qos_qid + ((mcast_h->mc_qos_qid54) << 4),
+				mcast_h->mc_mpre_sel);
+		} else {
+			pr_info("%x:%x:%x:%x  %d  %c%c%c%c %c%c%c%c (QID=%d, mc_mpre_sel=%d)\n",
+				mcast_l->mc_mac_addr[3],
+				mcast_l->mc_mac_addr[2],
+				mcast_l->mc_mac_addr[1],
+				mcast_l->mc_mac_addr[0],
+				mcast_h->mc_vid,
+				(mcast_h->mc_px_en & 0x08) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x04) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x02) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x01) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x08) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x04) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x02) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x01) ? '1' : '-',
+				mcast_h->mc_qos_qid,
+				mcast_h->mc_mpre_sel);
+		}
+	}
+	if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+		for (i = 0; i < MAX_MCAST_ENTRY16_63; i++) {
+			mcast_h = GET_PPE_MCAST_H10(i);
+			mcast_l = GET_PPE_MCAST_L10(i);
+
+			pr_info("%x:%x:%x:%x  %d  %c%c%c%c %c%c%c%c (QID=%d, mc_mpre_sel=%d)\n",
+				mcast_l->mc_mac_addr[3],
+				mcast_l->mc_mac_addr[2],
+				mcast_l->mc_mac_addr[1],
+				mcast_l->mc_mac_addr[0],
+				mcast_h->mc_vid,
+				(mcast_h->mc_px_en & 0x08) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x04) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x02) ? '1' : '-',
+				(mcast_h->mc_px_en & 0x01) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x08) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x04) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x02) ? '1' : '-',
+				(mcast_h->mc_px_qos_en & 0x01) ? '1' : '-',
+				mcast_h->mc_qos_qid + ((mcast_h->mc_qos_qid54) << 4),
+				mcast_h->mc_mpre_sel);
+		}
+	}
+}
+
+void foe_mcast_entry_del_all(void)
+{
+	int i;
+	struct ppe_mcast_h *mcast_h;
+	struct ppe_mcast_l *mcast_l;
+
+	for (i = 0; i < MAX_MCAST_ENTRY; i++) {
+		mcast_h = GET_PPE_MCAST_H(i);
+		mcast_l = GET_PPE_MCAST_L(i);
+		mcast_h->mc_px_en = 0;
+		mcast_h->mc_px_qos_en = 0;
+		mcast_h->valid = 0;
+		mcast_h->mc_vid = 0;
+		mcast_h->mc_qos_qid = 0;
+		mcast_h->mc_mpre_sel = 0;
+		memset(&mcast_l->mc_mac_addr, 0, 4);
+	}
+	if (hnat_chip_name & (MT7622_HWNAT | LEOPARD_HWNAT)) {
+		for (i = 0; i < MAX_MCAST_ENTRY16_63; i++) {
+			mcast_h = GET_PPE_MCAST_H10(i);
+			mcast_l = GET_PPE_MCAST_L10(i);
+			mcast_h->mc_px_en = 0;
+			mcast_h->mc_px_qos_en = 0;
+			mcast_h->valid = 0;
+			mcast_h->mc_vid = 0;
+			mcast_h->mc_qos_qid = 0;
+			mcast_h->mc_mpre_sel = 0;
+			memset(&mcast_l->mc_mac_addr, 0, 4);
+		}
+	}
+}
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/mcast_tbl.h b/src/kernel/modules/netsys_driver/nat/hw_nat/mcast_tbl.h
new file mode 100644
index 0000000..d71348d
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/mcast_tbl.h
@@ -0,0 +1,99 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef _MCAST_TBL_WANTED
+#define _MCAST_TBL_WANTED
+
+struct ppe_mcast_h {
+	uint32_t	mc_vid:12;
+	uint32_t	mc_qos_qid54:2; /* mt7622 only */
+	uint32_t	valid:1;
+	uint32_t	rev1:1;
+	uint32_t	mc_px_en:4;
+	uint32_t	mc_mpre_sel:2; /* 0=01:00, 2=33:33 */
+	uint32_t	mc_vid_cmp:1;
+	uint32_t	rev2:1;
+	uint32_t	mc_px_qos_en:4;
+	uint32_t	mc_qos_qid:4;
+};
+
+struct ppe_mcast_l {
+	u8	mc_mac_addr[4]; /* mc_mac_addr[31:0] */
+};
+
+/* DEFINITIONS AND MACROS*/
+#define MAX_MCAST_ENTRY	    16
+#define MAX_MCAST_ENTRY16_63    48
+#define MAX_MCAST_ENTRY_TOTOAL  64
+/* #define MCAST_DEBUG */
+#ifdef MCAST_DEBUG
+#define MCAST_PRINT(fmt, args...) pr_info(fmt, ## args)
+#else
+#define MCAST_PRINT(fmt, args...) { }
+#endif
+
+#define GET_PPE_MCAST_H(idx)		((struct ppe_mcast_h *)(PPE_MCAST_H_0 + ((idx) * 8)))
+#define GET_PPE_MCAST_L(idx)		((struct ppe_mcast_l *)(PPE_MCAST_L_0 + ((idx) * 8)))
+
+#define GET_PPE_MCAST_H10(idx)		((struct ppe_mcast_h *)(PPE_MCAST_H_10 + ((idx) * 8)))
+#define GET_PPE_MCAST_L10(idx)		((struct ppe_mcast_l *)(PPE_MCAST_L_10 + ((idx) * 8)))
+
+/* EXPORT FUNCTION*/
+int foe_mcast_entry_ins(u16 vlan_id, u8 *dst_mac,
+			u8 mc_px_en, u8 mc_px_qos_en, u8 mc_qos_qid);
+int foe_mcast_entry_qid(u16 vlan_id, u8 *dst_mac, u8 mc_qos_qid);
+int foe_mcast_entry_del(u16 vlan_id, u8 *dst_mac, u8 mc_px_en,
+			u8 mc_px_qos_en, u8 mc_qos_qid);
+void foe_mcast_entry_dump(void);
+void foe_mcast_entry_del_all(void);
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat.c b/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat.c
new file mode 100755
index 0000000..5bc7a52
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat.c
@@ -0,0 +1,706 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/if_vlan.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <net/rtnetlink.h>
+#include <net/netevent.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include "ra_nat.h"
+#include "foe_fdb.h"
+#include "frame_engine.h"
+#include "util.h"
+#include "hnat_ioctl.h"
+#include "hnat_define.h"
+#include "hnat_config.h"
+#include "hnat_dbg_proc.h"
+#include "hnat_common.h"
+
+struct timer_list hwnat_clear_entry_timer;
+//struct timer_list hnat_reset_timestamp_timer;
+unsigned int hnat_chip_name;
+EXPORT_SYMBOL(hnat_chip_name);
+unsigned int fe_feature;
+EXPORT_SYMBOL(fe_feature);
+void __iomem *fe_base;
+void __iomem *med_base;
+extern u32 rndis_mod;
+extern u32 rndis_bind_count;
+
+void set_rx_if_idx_md(struct sk_buff *skb, u8 channel)
+{
+	FOE_IF_IDX(skb) = channel + 8;
+}
+
+void set_rx_if_idx(struct sk_buff *skb)
+{
+	u8 i, match;
+
+	match = 0;
+	for (i = 0; i < 16; i++) {
+		if (dst_port[i] == skb->dev) {
+			FOE_IF_IDX(skb) = i;
+			match = 1;
+			//if (debug_level >= 7)
+			//	pr_info("%s : Interface=%s, i=%x\n", __func__, skb->dev->name, i);
+			break;
+		}
+	}
+	if (match == 0)
+		FOE_IF_IDX(skb) = 0xf;
+}
+#if(0)
+static void hwnat_clear_entry(struct timer_list *t)
+{
+	pr_info("HW_NAT work normally\n");
+	reg_modify_bits(PPE_FOE_CFG, FWD_CPU_BUILD_ENTRY, 4, 2);
+	/* del_timer_sync(&hwnat_clear_entry_timer); */
+}
+#endif
+#include "mcast_tbl.h"
+
+/*#include "../../../drivers/net/raeth/ra_ioctl.h"*/
+
+/* #define DSCP_REMARK_TEST */
+/* #define PREBIND_TEST */
+#define DD \
+{\
+pr_info("%s %d\n", __func__, __LINE__); \
+}
+#if(0)
+static void hnat_reset_timestamp(unsigned long data)
+{
+	struct foe_entry *entry;
+	int hash_index;
+
+	reg_modify_bits(PPE_FOE_CFG, DFL_FOE_TCP_AGE, 9, 0);
+	reg_modify_bits(PPE_FOE_CFG, DFL_FOE_UDP_AGE, 10, 0);
+	reg_write(FOE_TS, 0);
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+		entry = &ppe_foe_base[hash_index];
+		if (entry->bfib1.state == BIND)
+			entry->ipv4_hnapt.udib1.time_stamp = 0;
+	}
+
+	reg_modify_bits(PPE_FOE_CFG, DFL_FOE_TCP_AGE, 9, 1);
+	reg_modify_bits(PPE_FOE_CFG, DFL_FOE_UDP_AGE, 10, 1);
+	mod_timer(&hnat_reset_timestamp_timer, jiffies + 14400 * HZ);
+}
+
+void foe_clear_entry(struct neighbour *neigh)
+{
+	int hash_index, clear;
+	struct foe_entry *entry;
+	u32 *daddr = (u32 *)neigh->primary_key;
+	const u8 *addrtmp;
+	u8 mac0, mac1, mac2, mac3, mac4, mac5;
+	u32 dip;
+
+	dip = (u32)(*daddr);
+	clear = 0;
+	addrtmp = neigh->ha;
+	mac0 = (u8)(*addrtmp);
+	mac1 = (u8)(*(addrtmp + 1));
+	mac2 = (u8)(*(addrtmp + 2));
+	mac3 = (u8)(*(addrtmp + 3));
+	mac4 = (u8)(*(addrtmp + 4));
+	mac5 = (u8)(*(addrtmp + 5));
+
+	for (hash_index = 0; hash_index < FOE_4TB_SIZ; hash_index++) {
+		entry = &ppe_foe_base[hash_index];
+		if (entry->bfib1.state == BIND) {
+			/*pr_info("before old mac= %x:%x:%x:%x:%x:%x, new_dip=%x\n",*/
+			/*	entry->ipv4_hnapt.dmac_hi[3],*/
+			/*	entry->ipv4_hnapt.dmac_hi[2],*/
+			/*	entry->ipv4_hnapt.dmac_hi[1],*/
+			/*	entry->ipv4_hnapt.dmac_hi[0],*/
+			/*	entry->ipv4_hnapt.dmac_lo[1],*/
+			/*	entry->ipv4_hnapt.dmac_lo[0], entry->ipv4_hnapt.new_dip);*/
+			if (entry->ipv4_hnapt.new_dip == ntohl(dip)) {
+				if ((entry->ipv4_hnapt.dmac_hi[3] != mac0) ||
+				    (entry->ipv4_hnapt.dmac_hi[2] != mac1) ||
+				    (entry->ipv4_hnapt.dmac_hi[1] != mac2) ||
+				    (entry->ipv4_hnapt.dmac_hi[0] != mac3) ||
+				    (entry->ipv4_hnapt.dmac_lo[1] != mac4) ||
+				    (entry->ipv4_hnapt.dmac_lo[0] != mac5)) {
+					pr_info("%s: state=%d\n", __func__, neigh->nud_state);
+					reg_modify_bits(PPE_FOE_CFG, ONLY_FWD_CPU, 4, 2);
+
+					entry->ipv4_hnapt.udib1.state = INVALID;
+					entry->ipv4_hnapt.udib1.time_stamp = reg_read(FOE_TS) & 0xFF;
+					ppe_set_cache_ebl();
+					mod_timer(&hwnat_clear_entry_timer, jiffies + 3 * HZ);
+
+					pr_info("delete old entry: dip =%x\n", ntohl(dip));
+
+					pr_info("old mac= %x:%x:%x:%x:%x:%x, dip=%x\n",
+						entry->ipv4_hnapt.dmac_hi[3],
+						entry->ipv4_hnapt.dmac_hi[2],
+						entry->ipv4_hnapt.dmac_hi[1],
+						entry->ipv4_hnapt.dmac_hi[0],
+						entry->ipv4_hnapt.dmac_lo[1],
+						entry->ipv4_hnapt.dmac_lo[0],
+						ntohl(dip));
+					pr_info("new mac= %x:%x:%x:%x:%x:%x, dip=%x\n",
+						mac0, mac1, mac2, mac3, mac4, mac5, ntohl(dip));
+				}
+			}
+		}
+	}
+}
+
+static int wh2_netevent_handler(struct notifier_block *unused,
+				unsigned long event, void *ptr)
+{
+	struct net_device *dev = NULL;
+	struct neighbour *neigh = NULL;
+
+	switch (event) {
+	case NETEVENT_NEIGH_UPDATE:
+		neigh = ptr;
+		dev = neigh->dev;
+		if (dev)
+			foe_clear_entry(neigh);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block hnat_netevent_nb __read_mostly = {
+	.notifier_call = wh2_netevent_handler,
+};
+#endif
+#ifdef CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT
+int32_t ppe_tx_modem_handler(struct sk_buff *skb, u32 net_type, u32 channel_id)
+{
+	struct foe_entry *entry;
+	int ret;
+
+	FOE_MINFO_NTYPE(skb) = net_type;
+	FOE_MINFO_CHID(skb) = channel_id;
+
+	ret = check_whitelist(skb);
+
+	if (ret)
+		return 1;
+
+	ret = check_entry_region(skb);
+
+	if (ret)
+		return 1;
+
+	//decide which ppe0/ppe1
+	entry = decide_which_ppe(skb);
+
+	ret = tx_cpu_handler_modem(skb, entry, MDMA_PSE_PORT);
+
+	return ret;
+}
+
+extern void __iomem *medmcu_hnat_info_host_base_virt;
+int32_t ppe_rx_modem_handler(struct sk_buff *skb, u8 drop, u8 channel)
+{
+	int ret;
+	struct MED_HNAT_INFO_HOST *med_dmad;
+	unsigned int foe_entry_num, cpu_reason, wdix, rdix;
+	//void *cache_start;
+
+	rdix = reg_read(MEDHW_SSR1_DST_RB0_RIDX) & 0x3ffff;
+	wdix = reg_read(MEDHW_SSR1_DST_RB0_WIDX) & 0x3ffff;
+#if(0)
+	cache_start = medmcu_hnat_info_host_base_virt + (sizeof(struct MED_HNAT_INFO_HOST) * rdix);
+
+	__inval_dcache_area(cache_start, sizeof(struct MED_HNAT_INFO_HOST));
+#endif
+	med_dmad = &med_info_base[rdix];
+
+	if((rdix == wdix) && (wdix == 0)) {
+		pr_info("HNAT HOST INFO index error wdix = %x, rdix = %x\n", wdix, rdix);
+	}
+
+
+
+	foe_entry_num = med_dmad->dmad_info1.PPE_ENTRY;
+	cpu_reason = med_dmad->dmad_info1.CRSN;
+
+	if (debug_level >= 11) {
+		pr_info("MD RX : cpu_reason = %x, foe_entry_num = %x, rdix = %x, wdix = %x, channel= %u\n",
+			cpu_reason, foe_entry_num, rdix, wdix, channel);
+	}
+
+	reg_write(MEDHW_SSR1_DST_RB0_DEC, ((0x1 << 31) | 0x1));
+
+	//if (debug_level >= 6) {
+	//	while (reg_read(MEDHW_SSR1_DST_RB0_DEC) & (0x1 << 31));
+	//	rdix = reg_read(MEDHW_SSR1_DST_RB0_RIDX) & 0x3ffff;
+	//	pr_info("MD RX : new ridx = %x\n", rdix);
+	//}
+
+	if (drop == 1) {
+		if (debug_level >= 6)
+			pr_info("hook drop\n");
+		return 1;
+	}
+
+	if (IS_SPACE_AVAILABLE_HEAD(skb)) {
+		FOE_ENTRY_NUM(skb) = foe_entry_num;
+		FOE_AI(skb) = cpu_reason;
+		FOE_MAGIC_TAG(skb) = FOE_MAGIC_MED;
+	} else {
+		pr_info("header room size not available = %u\n", skb_headroom(skb));
+	}
+
+	foe_format_create(skb);
+	rx_debug_log(skb);
+	FOE_ALG(skb) = 0;
+	set_rx_if_idx_md(skb, channel);
+	ret = rx_cpu_handler_modem(skb);
+
+	return ret;
+}
+#endif
+int32_t ppe_tx_rndis_handler(struct sk_buff *skb)
+{
+	struct foe_entry *entry;
+	int ret;
+
+	if (debug_level >= 10)
+		pr_info("%s, FOE_AI(skb):0x%x, FOE_SP(skb):%d\n", __func__, FOE_AI(skb), FOE_SP(skb));
+	ret = check_whitelist(skb);
+
+	if (ret)
+		return 1;
+
+	ret = check_entry_region(skb);
+
+	if (ret)
+		return 1;
+
+	//decide which ppe0/ppe1
+	entry = decide_which_ppe(skb);
+
+	ret = tx_cpu_handler_rndis(skb, entry, ADMA_PSE_PORT);
+
+	return ret;
+}
+
+int32_t ppe_rx_rndis_handler(struct sk_buff *skb)
+{
+	int ret;
+
+	set_rx_if_idx(skb);
+	FOE_ALG(skb) = 0;
+	ret = rx_cpu_handler_rndis(skb);
+	return ret;
+}
+
+
+
+int32_t ppe_tx_wifi_handler(struct sk_buff *skb, int gmac_no)
+{
+	struct foe_entry *entry;
+	int ret;
+
+	if (debug_level >= 10)
+		pr_info("%s, FOE_AI(skb):0x%x, FOE_SP(skb):%d, gmac_no:%d\n", __func__, FOE_AI(skb), FOE_SP(skb), gmac_no);
+	ret = check_whitelist(skb);
+
+	if (ret)
+		return 1;
+
+	ret = check_entry_region(skb);
+
+	if (ret)
+		return 1;
+
+	//decide which ppe0/ppe1
+	entry = decide_which_ppe(skb);
+
+	ret = tx_cpu_handler_wifi(skb, entry, gmac_no);
+
+	if(debug_level >= 10)
+		pr_info("%s end, FOE_AI(skb):0x%x, FOE_SP(skb):%d, gmac_no:%d\n", __func__, FOE_AI(skb), FOE_SP(skb), gmac_no);
+
+
+	return ret;
+}
+
+int32_t ppe_rx_wifi_handler(struct sk_buff *skb)
+{
+	int ret;
+
+	foe_format_create(skb);
+	rx_debug_log(skb);
+	set_rx_if_idx(skb);
+	FOE_ALG(skb) = 0;
+	ret = rx_cpu_handler_wifi(skb);
+	return ret;
+}
+
+int32_t ppe_tx_eth_handler(struct sk_buff *skb, int gmac_no)
+{
+	struct foe_entry *entry;
+	int ret;
+
+	ret = check_whitelist(skb);
+
+	if (ret)
+		return 1;
+	ret = check_entry_region(skb);
+
+	if (ret)
+		return 1;
+
+	//decide which ppe0/ppe1
+	entry = decide_which_ppe(skb);
+
+	ret = tx_cpu_handler_eth(skb, entry, gmac_no);
+
+	if (debug_level >= 10)
+		pr_notice("%s, end, ret:%d\n", __func__, ret);
+
+	return ret;
+}
+
+int32_t ppe_rx_eth_handler(struct sk_buff *skb)
+{
+	int ret;
+	if (debug_level >= 10)
+		pr_info("%s, FOE_AI(skb):0x%x, FOE_SP(skb):%d\n", __func__, FOE_AI(skb), FOE_SP(skb));
+
+	foe_format_create(skb);
+	//FOE_INFO_DUMP(skb);
+	FOE_ALG(skb) = 0;
+	rx_debug_log(skb);
+	set_rx_if_idx(skb);
+	ret = rx_cpu_handler_eth(skb);
+
+	if (debug_level >= 10)
+		pr_info("%s end, ret:%d\n", __func__, ret);
+
+	return ret;
+}
+
+int32_t ppe_tx_ext_handler(struct sk_buff *skb, int gmac_no)
+{
+	struct foe_entry *entry;
+	int ret;
+
+	ret = check_whitelist(skb);
+
+	if (ret)
+		return 1;
+
+	ret = check_entry_region(skb);
+
+	if (ret)
+		return 1;
+
+	//decide which ppe0/ppe1
+	entry = decide_which_ppe(skb);
+
+	ret = tx_cpu_handler_ext(skb, entry, gmac_no);
+
+	return ret;
+
+}
+
+int32_t ppe_rx_ext_handler(struct sk_buff *skb)
+{
+	int ret;
+
+	if (debug_level >= 10)
+		pr_info("%s, FOE_AI(skb):%x\n", __func__, FOE_AI(skb));
+	set_rx_if_idx(skb);
+	ret = rx_cpu_handler_ext(skb);
+	return ret;
+}
+
+void ppe_modify_hook(bool clear, unsigned char hook_id, int dir) {
+
+	pr_info("ppe_modify_hook, clear = %d, hook_id = %d, dir=%d\n", clear, hook_id, dir);
+
+	if (hook_id == HWNAT_HOOK_ID_ETH) {
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_RX)
+			ppe_hook_rx_eth = clear ? NULL : ppe_rx_eth_handler;
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_TX)
+			ppe_hook_tx_eth = clear ? NULL : ppe_tx_eth_handler;
+
+	} else if (hook_id == HWNAT_HOOK_ID_WIFI) {
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_RX) {
+			ppe_hook_rx_wifi = clear ? NULL : ppe_rx_wifi_handler;
+			ra_sw_nat_hook_rx = clear ? NULL : ppe_rx_wifi_handler;
+		}
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_TX) {
+			ppe_hook_tx_wifi = clear ? NULL : ppe_tx_wifi_handler;
+			ra_sw_nat_hook_tx = clear ? NULL : ppe_tx_wifi_handler;
+		}
+#ifdef CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT
+	} else if (hook_id == HWNAT_HOOK_ID_MODEM) {
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_RX)
+			ppe_hook_rx_modem = clear ? NULL : ppe_rx_modem_handler;
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_TX)
+			ppe_hook_tx_modem = clear ? NULL : ppe_tx_modem_handler;
+
+#endif /* CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT */
+	} else if (hook_id == HWNAT_HOOK_ID_RNDIS) {
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_RX)
+			ppe_hook_rx_rndis = clear ? NULL : ppe_rx_rndis_handler;
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_TX)
+			ppe_hook_tx_rndis = clear ? NULL : ppe_tx_rndis_handler;
+
+	} else if (hook_id == HWNAT_HOOK_ID_EXT) {
+
+		if (dir == HWNAT_DIR_ID_ALL || dir == HWNAT_DIR_ID_RX)
+			ppe_hook_rx_ext = clear ? NULL : ppe_rx_ext_handler;
+
+		ppe_hook_tx_ext = NULL;
+	}
+
+	pr_notice("****************************************\n");
+	pr_notice("[0] ETH_RX is %p, ETH_TX is %p\n",
+			ppe_hook_rx_eth, ppe_hook_tx_eth);
+	pr_notice("[1] MD_RX is %p,MD_TX is %p\n",
+			ppe_hook_rx_modem, ppe_hook_tx_modem);
+	pr_notice("[2] WIFI_RX is %p, WIFI_TX is %p\n",
+			ra_sw_nat_hook_rx, ra_sw_nat_hook_tx);
+	pr_notice("[2] WIFI_RX is %p, WIFI_TX is %p\n",
+			ppe_hook_rx_wifi, ppe_hook_tx_wifi);
+	pr_notice("[3] RNDIS_RX is %p, RNDIS_TX is %p\n",
+			ppe_hook_rx_rndis, ppe_hook_tx_rndis);
+	pr_notice("[4] EXT_RX is %p, EXT_TX_HOOK is %p\n",
+			ppe_hook_rx_ext, ppe_hook_tx_ext);
+	pr_notice("****************************************\n");
+
+}
+dev_t dev_hnat;
+static int32_t ppe_init_mod(void)
+{
+	struct platform_device *pdev;
+
+	NAT_PRINT("MEDIATEK HW NAT Module Enabled\n");
+	hwnat_config_setting();
+	fe_feature_setting();
+
+	fe_base = ioremap(MTK_FE_BASE, MTK_FE_RANGE);
+	med_base = ioremap(MTK_MED_BASE, MTK_FE_RANGE);
+
+
+	pdev = platform_device_alloc("HW_NAT", PLATFORM_DEVID_AUTO);
+	if (!pdev)
+		return -ENOMEM;
+
+	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+	hwnat_setup_dma_ops(&pdev->dev, FALSE);
+
+	/* Set PPE FOE Hash Mode */
+	if (!ppe_setfoe_hash_mode(DFL_FOE_HASH_MODE, &pdev->dev)) {
+		pr_info("PPE0 memory allocation failed\n");
+		return -ENOMEM;	/* memory allocation failed */
+	}
+
+	/* Set PPE FOE Hash Mode */
+	if (!ppe1_setfoe_hash_mode(DFL_FOE_HASH_MODE, &pdev->dev)) {
+		pr_info("PPE1 memory allocation failed\n");
+		return -ENOMEM;	/* memory allocation failed */
+	}
+
+	/* Get net_device structure of Dest Port */
+	ppe_set_dst_port(1);
+
+	/* Register ioctl handler */
+	ppe_reg_ioctl_handler(dev_hnat);
+
+	ppe_eng_init();
+
+	/* In manual mode, PPE always reports UN-HIT CPU reason, so we don't need to process it */
+	/* Register RX/TX hook point */
+	if (!(fe_feature & MANUAL_MODE)) {
+		ppe_hook_rx_wifi = ppe_rx_wifi_handler;
+		ppe_hook_tx_wifi = ppe_tx_wifi_handler;
+		ra_sw_nat_hook_rx = ppe_rx_wifi_handler;
+		ra_sw_nat_hook_tx = ppe_tx_wifi_handler;
+#ifdef CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT
+		ppe_hook_rx_modem = ppe_rx_modem_handler;
+		ppe_hook_tx_modem = ppe_tx_modem_handler;
+#endif
+		ppe_hook_rx_rndis = ppe_rx_rndis_handler;
+		ppe_hook_tx_rndis = ppe_tx_rndis_handler;
+
+#ifdef	CONFIG_RAETH_EDMA
+		ppe_hook_rx_eth = NULL;
+		ppe_hook_tx_eth = NULL;
+#else
+		ppe_hook_rx_eth = ppe_rx_eth_handler;
+		ppe_hook_tx_eth = ppe_tx_eth_handler;
+#endif
+		ppe_hook_rx_ext = ppe_rx_ext_handler;
+		/* EDIA TX fast path is not ready */
+		ppe_hook_tx_ext = NULL;
+		ppe_get_dev_stats = ppe_get_dev_stats_handler;
+
+
+	}
+
+	ppe_dev_register_hook = ppe_dev_reg_handler;
+	ppe_dev_unregister_hook = ppe_dev_unreg_handler;
+
+	/* Set GMAC fowrards packet to PPE */
+	set_gdma_fwd(1);
+
+	//register_netevent_notifier(&hnat_netevent_nb);
+	//init_timers(&hwnat_clear_entry_timer);
+	//hwnat_clear_entry_timer.function = hwnat_clear_entry;
+	//timer_setup(&hwnat_clear_entry_timer, hwnat_clear_entry, 0);
+
+
+	//if (hnat_chip_name & LEOPARD_HWNAT) {
+		//init_timer(&hnat_reset_timestamp_timer);
+		//hnat_reset_timestamp_timer.function = hnat_reset_timestamp;
+		//hnat_reset_timestamp_timer.expires  = jiffies;
+		//add_timer(&hnat_reset_timestamp_timer);
+	//}
+	/*if (fe_feature & HNAT_IPI)*/
+	/*	HnatIPIInit();*/
+	hnat_debug_proc_init();
+
+	if (fe_feature & SW_DVFS)
+		sw_dvfs_init();
+
+	ppe_init_mib_counter();
+	return 0;
+}
+
+static void ppe_cleanup_mod(void)
+{
+	NAT_PRINT("Ralink HW NAT Module Disabled\n");
+
+	/* Set GMAC fowrards packet to CPU */
+	set_gdma_fwd(0);
+
+	/* Unregister RX/TX hook point */
+	ppe_hook_rx_wifi = NULL;
+	ppe_hook_tx_wifi = NULL;
+#ifdef CONFIG_MTK_TINYSYS_MEDMCU_SUPPORT
+	ppe_hook_rx_modem = NULL;
+	ppe_hook_tx_modem = NULL;
+#endif
+	ppe_hook_rx_rndis = NULL;
+	ppe_hook_tx_rndis = NULL;
+	ppe_hook_rx_eth = NULL;
+	ppe_hook_tx_eth = NULL;
+	ppe_hook_rx_ext = NULL;
+	ppe_hook_tx_ext = NULL;
+	ppe_dev_register_hook = NULL;
+	ppe_dev_unregister_hook = NULL;
+	ra_sw_nat_hook_rx = NULL;
+	ra_sw_nat_hook_tx = NULL;
+	ppe_get_dev_stats = NULL;
+
+	/* Restore PPE related register */
+	/* ppe_eng_stop(); */
+	/* iounmap(ppe_foe_base); */
+
+	/* Unregister ioctl handler */
+	ppe_unreg_ioctl_handler(dev_hnat);
+	//if ((fe_feature & HNAT_QDMA) && (fe_feature & HNAT_MCAST))
+		//foe_mcast_entry_del_all();
+
+	/* Release net_device structure of Dest Port */
+	ppe_set_dst_port(0);
+	//if (hnat_chip_name & LEOPARD_HWNAT)
+		//del_timer_sync(&hnat_reset_timestamp_timer);
+
+	//del_timer(&hwnat_clear_entry_timer);
+/*	if(fe_feature & HNAT_IPI)*/
+/*		HnatIPIDeInit();*/
+
+	//unregister_netevent_notifier(&hnat_netevent_nb);
+
+	if (fe_feature & SW_DVFS)
+		sw_dvfs_fini();
+
+	hnat_debug_proc_exit();
+	iounmap(fe_base);
+	iounmap(med_base);
+}
+
+
+
+module_init(ppe_init_mod);
+module_exit(ppe_cleanup_mod);
+
+MODULE_AUTHOR("Steven Liu/Kurtis Ke");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Mediatek Hardware NAT\n");
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat.h b/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat.h
new file mode 100755
index 0000000..59b848b
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat.h
@@ -0,0 +1,650 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _RA_NAT_WANTED
+#define _RA_NAT_WANTED
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+
+#ifndef NEXTHDR_IPIP
+#define NEXTHDR_IPIP 4
+#endif
+
+#if defined(CONFIG_RA_NAT_HW)
+extern void hwnat_magic_tag_set_zero(struct sk_buff *skb);
+extern void hwnat_check_magic_tag(struct sk_buff *skb);
+extern void hwnat_set_headroom_zero(struct sk_buff *skb);
+extern void hwnat_set_tailroom_zero(struct sk_buff *skb);
+extern void hwnat_copy_headroom(u8 *data, struct sk_buff *skb);
+extern void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb);
+extern void hwnat_setup_dma_ops(struct device *dev, bool coherent);
+#else
+
+static inline void hwnat_magic_tag_set_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_check_magic_tag(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_set_headroom_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_set_tailroom_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_copy_headroom(u8 *data, struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb)
+{
+}
+
+#endif
+enum foe_cpu_reason {
+	TTL_0 = 0x02,		/* IPv4(IPv6) TTL(hop limit) = 0 */
+	/* IPv4(IPv6) has option(extension) header */
+	HAS_OPTION_HEADER = 0x03,
+	NO_FLOW_IS_ASSIGNED = 0x07,	/* No flow is assigned */
+	/* IPv4 HNAT doesn't support IPv4 /w fragment */
+	IPV4_WITH_FRAGMENT = 0x08,
+	/* IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment */
+	IPV4_HNAPT_DSLITE_WITH_FRAGMENT = 0x09,
+	/* IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport */
+	IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP = 0x0A,
+	/* IPv6 5T-route/6RD can't find TCP/UDP sport/dport */
+	IPV6_5T_6RD_WITHOUT_TCP_UDP = 0x0B,
+	/* Ingress packet is TCP fin/syn/rst */
+	/*(for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
+	TCP_FIN_SYN_RST = 0x0C,
+	UN_HIT = 0x0D,		/* FOE Un-hit */
+	HIT_UNBIND = 0x0E,	/* FOE Hit unbind */
+	/* FOE Hit unbind & rate reach */
+	HIT_UNBIND_RATE_REACH = 0x0F,
+	HIT_BIND_TCP_FIN = 0x10,	/* Hit bind PPE TCP FIN entry */
+	/* Hit bind PPE entry and TTL(hop limit) = 1 */
+	/* and TTL(hot limit) - 1 */
+	HIT_BIND_TTL_1 = 0x11,
+	/* Hit bind and VLAN replacement violation */
+	/*(Ingress 1(0) VLAN layers and egress 4(3 or 4) VLAN layers) */
+	HIT_BIND_WITH_VLAN_VIOLATION = 0x12,
+	/* Hit bind and keep alive with unicast old-header packet */
+	HIT_BIND_KEEPALIVE_UC_OLD_HDR = 0x13,
+	/* Hit bind and keep alive with multicast new-header packet */
+	HIT_BIND_KEEPALIVE_MC_NEW_HDR = 0x14,
+	/* Hit bind and keep alive with duplicate old-header packet */
+	HIT_BIND_KEEPALIVE_DUP_OLD_HDR = 0x15,
+	/* FOE Hit bind & force to CPU */
+	HIT_BIND_FORCE_TO_CPU = 0x16,
+	/* Hit bind and remove tunnel IP header, */
+	/* but inner IP has option/next header */
+	HIT_BIND_WITH_OPTION_HEADER = 0x17,
+	/* Hit bind and exceed MTU */
+	HIT_BIND_EXCEED_MTU = 0x1C,
+	HIT_BIND_PACKET_SAMPLING = 0x1B,	/*  PS packet */
+	/*  Switch clone multicast packet to CPU */
+	HIT_BIND_MULTICAST_TO_CPU = 0x18,
+	/*  Switch clone multicast packet to GMAC1 & CPU */
+	HIT_BIND_MULTICAST_TO_GMAC_CPU = 0x19,
+	HIT_PRE_BIND = 0x1A	/*  Pre-bind */
+};
+
+
+/*MT7622*/
+/*      2bytes	    4bytes	    3bytes        */
+/*   +-----------+--------------------+---------+ */
+/*   |  Magic Tag | RX/TX Desc info4  |wifi info |*/
+/*   +-----------|--------------------+---------+ */
+/*   |<-----------FOE Flow Info----------------->|*/
+
+#define MAX_IF_NUM 64
+
+/*Colgin head*/
+/*   +----------------------------------------------------------- */
+/*   | entry_idx(15) | cpu_reason(5)|source port(4)|alg(1)|rsv(7) */
+/*   ------------------------------------------------------------ */
+/*   |IF(8)|wifi info(18)|rsv(6)|minfo(16)|protect(16)*/
+
+
+/*Colgin tail*/
+/*   +-----------------------------------------------------------*/
+/*   | |protect(16)| entry_idx(15) | cpu_reason(5)|source port(4)|*/
+/*   ------------------------------------------------------------*/
+/*   |alg(1)|rsv(7)|IF(8)|wifi info(18)|rsv(6)|minfo(16)*/
+//tail room use
+
+/*
+	unsigned int FOE_ENTRY:15;
+	unsigned int RSV0:3;
+	unsigned int CRSN:5;
+	unsigned int RSV1:3;
+	unsigned int SP:4;
+	unsigned int RSV2:2;
+*/
+
+struct dmad_rx_descinfo4 {
+	uint32_t foe_entry_num:15;
+	uint32_t rsv0:3;
+	uint32_t CRSN:5;
+	uint32_t rsv1:3;
+	uint32_t SPORT:4;
+	uint32_t ppe:1;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv3:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t IF_IDX:4;
+	u16 MAGIC_TAG_PROTECT;
+} __packed;
+
+
+struct pdma_rx_desc_info4 {
+	u16 MAGIC_TAG_PROTECT;
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t rsv:6;
+	uint32_t foe_entry_num_1:1;
+	uint32_t ppe:1;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv2:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t IF_IDX:4;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+} __packed;
+
+struct head_rx_descinfo4 {
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t rsv:6;
+	uint32_t foe_entry_num_1:1;
+	uint32_t ppe:1;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv2:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t IF_IDX:4;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+	u16 MAGIC_TAG_PROTECT;
+} __packed;
+/*
+	unsigned int FOE_ENTRY:15;
+	unsigned int RSV0:3;
+	unsigned int CRSN:5;
+	unsigned int RSV1:3;
+	unsigned int SP:4;
+	unsigned int RSV2:2;
+*/
+
+
+
+struct cb_rx_desc_info4 {
+	u16 MAGIC_TAG_PROTECT0;
+	uint32_t foe_entry_num:15;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t ALG:1;
+	uint32_t rsv:7;
+	uint16_t IF:8;
+	uint16_t WDMAID:2;
+	uint16_t RXID:2;
+	uint16_t WCID:10;
+	uint16_t BSSID:6;
+	uint16_t rsv1:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t IF_IDX:4;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+	u16 MAGIC_TAG_PROTECT1;
+} __packed;
+
+#define FOE_INFO_LEN		    12
+#define WIFI_INFO_LEN		    6
+
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_INFO_LEN		    (6 + 4 + WIFI_INFO_LEN)
+#define FOE_MAGIC_FASTPATH	    0x77
+#define FOE_MAGIC_L2TPPATH	    0x78
+#endif
+
+#define FOE_MAGIC_PCI		    0x73
+#define FOE_MAGIC_WLAN		    0x74
+#define FOE_MAGIC_GE		    0x75
+#define FOE_MAGIC_PPE		    0x76
+#define FOE_MAGIC_WED0		    0x78
+#define FOE_MAGIC_WED1		    0x79
+#define FOE_MAGIC_MED		    0x80
+#define FOE_MAGIC_EDMA0		    0x81
+#define FOE_MAGIC_EDMA1		    0x82
+#define FOE_MAGIC_PPE0		    0x83
+#define FOE_MAGIC_PPE1		    0x84
+#define FOE_MAGIC_EDMARX	    0x85
+#define FOE_MAGIC_RNDIS		    0x86
+#define TAG_PROTECT                 0x6789
+#define USE_HEAD_ROOM               0
+#define USE_TAIL_ROOM               1
+#define USE_CB                      2
+#define ALL_INFO_ERROR              3
+
+/**************************DMAD FORMAT********************************/
+#define FOE_TAG_PROTECT(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT)
+
+#define FOE_ENTRY_NUM(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->foe_entry_num)
+
+#define FOE_ALG(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->ALG)
+#define FOE_AI(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->CRSN)
+#define FOE_SP(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->SPORT)
+#define FOE_MAGIC_TAG(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->IF)
+#define FOE_IF_IDX(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->IF_IDX)
+#define FOE_WDMA_ID(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID(skb)	(((struct dmad_rx_descinfo4 *)((skb)->head))->RXID)
+#define FOE_WC_ID(skb)	(((struct dmad_rx_descinfo4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID(skb)	(((struct dmad_rx_descinfo4 *)((skb)->head))->BSSID)
+#define FOE_PPE(skb)	(((struct dmad_rx_descinfo4 *)((skb)->head))->ppe)
+
+/***********************HEAD FORMAT*************************************/
+
+#define FOE_TAG_PROTECT_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT)
+#define FOE_ENTRY_NUM_LSB_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num)
+#define FOE_ENTRY_NUM_MSB_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num_1)
+
+#define FOE_ENTRY_NUM_HEAD(skb)  \
+	(((FOE_ENTRY_NUM_MSB_HEAD(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_HEAD(skb))
+
+
+#define FOE_ALG_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->ALG)
+#define FOE_AI_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->CRSN)
+#define FOE_SP_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SPORT)
+#define FOE_MAGIC_TAG_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->IF)
+#define FOE_IF_IDX_HEAD(skb)  \
+	(((struct dmad_rx_descinfo4 *)((skb)->head))->IF_IDX)
+#define FOE_WDMA_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->RXID)
+#define FOE_WC_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->BSSID)
+#define FOE_PPE_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->PPE)
+
+/****************************TAIL FORMAT***************************************/
+
+#define FOE_TAG_PROTECT_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->MAGIC_TAG_PROTECT)
+#define FOE_ENTRY_NUM_LSB_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num)
+
+#define FOE_ENTRY_NUM_MSB_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num_1)
+#define FOE_ENTRY_NUM_TAIL(skb)  \
+	(((FOE_ENTRY_NUM_MSB_TAIL(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_TAIL(skb))
+#define FOE_ALG_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ALG)
+#define FOE_AI_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->CRSN)
+#define FOE_SP_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SPORT)
+#define FOE_MAGIC_TAG_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->IF)
+#define FOE_IF_IDX_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->IF_IDX)
+#define FOE_WDMA_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WDMAID)
+#define FOE_RX_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->RXID)
+#define FOE_WC_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WCID)
+#define FOE_BSS_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->BSSID)
+
+#define FOE_PPE_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ppe)
+/*********************************************************************/
+
+
+#define FOE_WDMA_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->RXID)
+#define FOE_WC_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->BSSID)
+
+
+#define FOE_MINFO(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->minfo)
+#define FOE_MINFO_NTYPE(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->ntype)
+#define FOE_MINFO_CHID(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->chid)
+#define FOE_MINFO_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->minfo)
+#define FOE_MINFO_NTYPE_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->ntype)
+#define FOE_MINFO_CHID_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->chid)
+
+#define FOE_MINFO_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->minfo)
+#define FOE_MINFO_NTYPE_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ntype)
+#define FOE_MINFO_CHID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->chid)
+
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+
+#define IS_SPACE_AVAILABLE_HEAD(skb)  \
+	((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+#define IS_SPACE_AVAILABLE_HEAD(skb)  \
+	((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+#define FOE_INFO_START_ADDR_HEAD(skb)	(skb->head)
+
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+#define IS_SPACE_AVAILABLE_TAIL(skb)  \
+	(((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0))
+#define IS_SPACE_AVAILABLE_TAIL(skb)  \
+	(((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0))
+#define FOE_INFO_START_ADDR_TAIL(skb)  \
+	((unsigned char *)(long)(skb_end_pointer(skb) - FOE_INFO_LEN))
+
+
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SOURCE)
+#define FOE_DEST_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->DEST)
+#endif
+
+/* change the position of skb_CB if necessary */
+#define CB_OFFSET		    40
+#define IS_SPACE_AVAILABLE_CB(skb)    1
+#define FOE_INFO_START_ADDR_CB(skb)    (skb->cb +  CB_OFFSET)
+#define FOE_TAG_PROTECT_CB0(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT0)
+#define FOE_TAG_PROTECT_CB1(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT1)
+#define FOE_ENTRY_NUM_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->foe_entry_num)
+#define FOE_ALG_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->ALG)
+#define FOE_AI_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->CRSN)
+#define FOE_SP_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SPORT)
+#define FOE_MAGIC_TAG_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->IF)
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_CB(skb)	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SOURCE)
+#define FOE_DEST_CB(skb)	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->DEST)
+#endif
+
+#define IS_MAGIC_TAG_PROTECT_VALID(skb)  \
+	(FOE_TAG_PROTECT(skb) == TAG_PROTECT)
+#define IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)  \
+	(FOE_TAG_PROTECT_HEAD(skb) == TAG_PROTECT)
+#define IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)  \
+	(FOE_TAG_PROTECT_TAIL(skb) == TAG_PROTECT)
+#define IS_MAGIC_TAG_PROTECT_VALID_CB(skb)  \
+	((FOE_TAG_PROTECT_CB0(skb) == TAG_PROTECT) && \
+	(FOE_TAG_PROTECT_CB0(skb) == FOE_TAG_PROTECT_CB1(skb)))
+
+#define IS_IF_PCIE_WLAN_HEAD(skb)  \
+	((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \
+	(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE) || \
+	(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_RNDIS) || \
+	(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_EDMARX))
+
+#define IS_IF_PCIE_WLAN_TAIL(skb)  \
+	((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN) || \
+	(FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_RNDIS))
+
+#define IS_IF_PCIE_WLAN_CB(skb)  \
+	((FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_WLAN) || \
+	(FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_RNDIS))
+
+/* macros */
+#define magic_tag_set_zero(skb) \
+{ \
+	if ((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_RNDIS) || \
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE)) { \
+		if (IS_SPACE_AVAILABLE_HEAD(skb)) \
+			FOE_MAGIC_TAG_HEAD(skb) = 0; \
+	} \
+	if ((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN) || \
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_RNDIS) || \
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_GE)) { \
+		if (IS_SPACE_AVAILABLE_TAIL(skb)) \
+			FOE_MAGIC_TAG_TAIL(skb) = 0; \
+	} \
+}
+
+static inline void hwnat_set_l2tp_unhit(struct iphdr *iph, struct sk_buff *skb)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	/* only clear headeroom for TCP OR not L2TP packets */
+	if ((iph->protocol == 0x6) || (ntohs(udp_hdr(skb)->dest) != 1701)) {
+		if (IS_SPACE_AVAILABLE_HEAD(skb)) {
+			FOE_MAGIC_TAG(skb) = 0;
+			FOE_AI(skb) = UN_HIT;
+		}
+	}
+#endif
+}
+
+static inline void hwnat_set_l2tp_fast_path(u32 l2tp_fast_path, u32 pptp_fast_path)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	l2tp_fast_path = 1;
+	pptp_fast_path = 0;
+#endif
+}
+
+static inline void hwnat_clear_l2tp_fast_path(u32 l2tp_fast_path)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	l2tp_fast_path = 0;
+#endif
+}
+
+/* #define CONFIG_HW_NAT_IPI */
+#if defined(CONFIG_HW_NAT_IPI)
+extern int debug_level;
+int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+		struct rps_dev_flow **rflowp);
+uint32_t ppe_extif_rx_handler(struct sk_buff *skb);
+int hitbind_force_to_cpu_handler(struct sk_buff *skb, struct foe_entry *entry);
+extern unsigned int ipidbg[num_possible_cpus()][10];
+extern unsigned int ipidbg2[num_possible_cpus()][10];
+/* #define HNAT_IPI_RXQUEUE	1 */
+#define HNAT_IPI_DQ		1
+#define HNAT_IPI_HASH_NORMAL	0
+#define HNAT_IPI_HASH_VTAG		1
+#define HNAT_IPI_HASH_FROM_EXTIF	2
+#define HNAT_IPI_HASH_FROM_GMAC		4
+
+struct hnat_ipi_s {
+#if defined(HNAT_IPI_DQ)
+	struct sk_buff_head     skb_input_queue;
+	struct sk_buff_head     skb_process_queue;
+#elif defined(HNAT_IPI_RXQUEUE)
+	atomic_t rx_queue_num;
+	unsigned int rx_queue_ridx;
+	unsigned int rx_queue_widx;
+	struct sk_buff **rx_queue;
+#else
+	/* unsigned int dummy0[0]; */
+	struct sk_buff_head     skb_ipi_queue;
+	/* unsigned int dummy1[8]; */
+#endif
+	unsigned long time_rec, recv_time;
+	unsigned int ipi_accum;
+	/*hwnat ipi use*/
+	spinlock_t      ipilock;
+	struct tasklet_struct smp_func_call_tsk;
+} ____cacheline_aligned_in_smp;
+
+struct hnat_ipi_stat {
+	unsigned long drop_pkt_num_from_extif;
+	unsigned long drop_pkt_num_from_ppehit;
+	unsigned int smp_call_cnt_from_extif;
+	unsigned int smp_call_cnt_from_ppehit;
+	atomic_t cpu_status;
+	/* atomic_t cpu_status_from_extif; */
+	/* atomic_t cpu_status_from_ppehit; */
+
+	/* atomic_t hook_status_from_extif; */
+	/* atomic_t hook_status_from_ppehit; */
+} ____cacheline_aligned_in_smp;
+
+#define cpu_status_from_extif	cpu_status
+#define cpu_status_from_ppehit	cpu_status
+
+struct hnat_ipi_cfg {
+	unsigned int enable_from_extif;
+	unsigned int enable_from_ppehit;
+	unsigned int queue_thresh_from_extif;
+	unsigned int queue_thresh_from_ppehit;
+	unsigned int drop_pkt_from_extif;
+	unsigned int drop_pkt_from_ppehit;
+	unsigned int ipi_cnt_mod_from_extif;
+	unsigned int ipi_cnt_mod_from_ppehit;
+} ____cacheline_aligned_in_smp;
+
+int hnat_ipi_init(void);
+int hnat_ipi_de_init(void);
+#endif
+
+#define QDMA_RX		5
+#define PDMA_RX		0
+
+int ppe_reg_ioctl_handler(dev_t dev);
+void ppe_unreg_ioctl_handler(dev_t dev);
+
+void sw_dvfs_init(void);
+void sw_dvfs_fini(void);
+
+void ppe_modify_hook(bool clear, unsigned char hook_id, int dir);
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat_edma.c b/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat_edma.c
new file mode 100755
index 0000000..a262d13
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/ra_nat_edma.c
@@ -0,0 +1,715 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _RA_NAT_WANTED
+#define _RA_NAT_WANTED
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+
+#ifndef NEXTHDR_IPIP
+#define NEXTHDR_IPIP 4
+#endif
+
+#define hwnat_vlan_tx_tag_present(__skb)     ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define hwnat_vlan_tag_get(__skb)         ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+
+#if defined(CONFIG_RA_NAT_HW)
+extern void hwnat_magic_tag_set_zero(struct sk_buff *skb);
+extern void hwnat_check_magic_tag(struct sk_buff *skb);
+extern void hwnat_set_headroom_zero(struct sk_buff *skb);
+extern void hwnat_set_tailroom_zero(struct sk_buff *skb);
+extern void hwnat_copy_headroom(u8 *data, struct sk_buff *skb);
+extern void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb);
+extern void hwnat_setup_dma_ops(struct device *dev, bool coherent);
+#else
+
+static inline void hwnat_magic_tag_set_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_check_magic_tag(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_set_headroom_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_set_tailroom_zero(struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_copy_headroom(u8 *data, struct sk_buff *skb)
+{
+}
+
+static inline void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb)
+{
+}
+
+#endif
+enum foe_cpu_reason {
+	TTL_0 = 0x02,		/* IPv4(IPv6) TTL(hop limit) = 0 */
+	/* IPv4(IPv6) has option(extension) header */
+	HAS_OPTION_HEADER = 0x03,
+	NO_FLOW_IS_ASSIGNED = 0x07,	/* No flow is assigned */
+	/* IPv4 HNAT doesn't support IPv4 /w fragment */
+	IPV4_WITH_FRAGMENT = 0x08,
+	/* IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment */
+	IPV4_HNAPT_DSLITE_WITH_FRAGMENT = 0x09,
+	/* IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport */
+	IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP = 0x0A,
+	/* IPv6 5T-route/6RD can't find TCP/UDP sport/dport */
+	IPV6_5T_6RD_WITHOUT_TCP_UDP = 0x0B,
+	/* Ingress packet is TCP fin/syn/rst */
+	/*(for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
+	TCP_FIN_SYN_RST = 0x0C,
+	UN_HIT = 0x0D,		/* FOE Un-hit */
+	HIT_UNBIND = 0x0E,	/* FOE Hit unbind */
+	/* FOE Hit unbind & rate reach */
+	HIT_UNBIND_RATE_REACH = 0x0F,
+	HIT_BIND_TCP_FIN = 0x10,	/* Hit bind PPE TCP FIN entry */
+	/* Hit bind PPE entry and TTL(hop limit) = 1 */
+	/* and TTL(hot limit) - 1 */
+	HIT_BIND_TTL_1 = 0x11,
+	/* Hit bind and VLAN replacement violation */
+	/*(Ingress 1(0) VLAN layers and egress 4(3 or 4) VLAN layers) */
+	HIT_BIND_WITH_VLAN_VIOLATION = 0x12,
+	/* Hit bind and keep alive with unicast old-header packet */
+	HIT_BIND_KEEPALIVE_UC_OLD_HDR = 0x13,
+	/* Hit bind and keep alive with multicast new-header packet */
+	HIT_BIND_KEEPALIVE_MC_NEW_HDR = 0x14,
+	/* Hit bind and keep alive with duplicate old-header packet */
+	HIT_BIND_KEEPALIVE_DUP_OLD_HDR = 0x15,
+	/* FOE Hit bind & force to CPU */
+	HIT_BIND_FORCE_TO_CPU = 0x16,
+	/* Hit bind and remove tunnel IP header, */
+	/* but inner IP has option/next header */
+	HIT_BIND_WITH_OPTION_HEADER = 0x17,
+	/* Hit bind and exceed MTU */
+	HIT_BIND_EXCEED_MTU = 0x1C,
+	HIT_BIND_PACKET_SAMPLING = 0x1B,	/*  PS packet */
+	/*  Switch clone multicast packet to CPU */
+	HIT_BIND_MULTICAST_TO_CPU = 0x18,
+	/*  Switch clone multicast packet to GMAC1 & CPU */
+	HIT_BIND_MULTICAST_TO_GMAC_CPU = 0x19,
+	HIT_PRE_BIND = 0x1A	/*  Pre-bind */
+};
+
+/*!MT7622*/
+/*    2bytes	    4bytes          */
+/* +-----------+-------------------+*/
+/* | Magic Tag | RX/TX Desc info4  |*/
+/* +-----------+-------------------+*/
+/* |<------FOE Flow Info---------->|*/
+
+/*MT7622*/
+/*      2bytes	    4bytes	    3bytes        */
+/*   +-----------+--------------------+---------+ */
+/*   |  Magic Tag | RX/TX Desc info4  |wifi info |*/
+/*   +-----------|--------------------+---------+ */
+/*   |<-----------FOE Flow Info----------------->|*/
+
+#define MAX_IF_NUM 64
+#if !defined(CONFIG_ARCH_COLGIN)
+struct pdma_rx_desc_info4 {
+	u16 MAGIC_TAG_PROTECT;
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t ALG:1;
+	uint16_t IF:8;
+	u8 WDMAID;
+	uint16_t RXID:2;
+	uint16_t WCID:8;
+	uint16_t BSSID:6;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+} __packed;
+
+struct head_rx_descinfo4 {
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	u8 WDMAID;
+	uint16_t RXID:2;
+	uint16_t WCID:8;
+	uint16_t BSSID:6;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+	u16 MAGIC_TAG_PROTECT;
+} __packed;
+
+struct cb_rx_desc_info4 {
+	u16 MAGIC_TAG_PROTECT0;
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:3;
+	uint32_t rsv:1;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	u16 MAGIC_TAG_PROTECT1;
+	u8 WDMAID;
+	uint16_t RXID:2;
+	uint16_t WCID:8;
+	uint16_t BSSID:6;
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+} __packed;
+#else
+/*Colgin head*/
+/*   +----------------------------------------------------------- */
+/*   | entry_idx(15) | cpu_reason(5)|source port(4)|alg(1)|rsv(7) */
+/*   ------------------------------------------------------------ */
+/*   |IF(8)|wifi info(18)|rsv(6)|minfo(16)|protect(16)*/
+
+
+/*Colgin tail*/
+/*   +-----------------------------------------------------------*/
+/*   | |protect(16)| entry_idx(15) | cpu_reason(5)|source port(4)|*/
+/*   ------------------------------------------------------------*/
+/*   |alg(1)|rsv(7)|IF(8)|wifi info(18)|rsv(6)|minfo(16)*/
+//tail room use
+		
+struct pdma_rx_desc_info4_eth {
+	u16 MAGIC_TAG_PROTECT;
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;	
+	uint32_t rsv:6;
+	uint32_t foe_entry_num_1:1;
+	uint32_t ALG:2;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;	
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv2:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv3:4;					
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+} __packed;
+
+struct head_rx_descinfo4_eth {
+	uint32_t foe_entry_num:14;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t rsv:6;
+	uint32_t foe_entry_num_1:1;
+	uint32_t ALG:2;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;	
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv2:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv3:4;	
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+	u16 MAGIC_TAG_PROTECT;
+} __packed;
+/*
+	unsigned int FOE_ENTRY:15;
+	unsigned int RSV0:3;
+	unsigned int CRSN:5;
+	unsigned int RSV1:3;
+	unsigned int SP:4;
+	unsigned int RSV2:2;
+*/
+
+struct pdma_rx_desc_info4 {
+	u16 MAGIC_TAG_PROTECT;
+	uint32_t foe_entry_num:15;
+	uint32_t rsv0:3;
+	uint32_t CRSN:5;
+	uint32_t rsv1:3;	
+	uint32_t SPORT:4;	
+	uint32_t rsv2:2;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;	
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv2:3;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv3:4;					
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+} __packed;
+
+struct head_rx_descinfo4 {
+	uint32_t foe_entry_num:15;
+	uint32_t rsv0:3;
+	uint32_t CRSN:5;
+	uint32_t rsv1:3;	
+	uint32_t SPORT:4;	
+	uint32_t rsv2:2;
+	uint32_t ALG:1;
+	uint32_t IF:8;
+	uint32_t WDMAID:2;	
+	uint32_t RXID:2;
+	uint32_t WCID:10;
+	uint32_t BSSID:6;
+	uint32_t rsv2:3;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv3:4;	
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+	u16 MAGIC_TAG_PROTECT;
+} __packed;
+
+
+struct cb_rx_desc_info4 {
+	u16 MAGIC_TAG_PROTECT0;
+	uint32_t foe_entry_num:15;
+	uint32_t CRSN:5;
+	uint32_t SPORT:4;
+	uint32_t ALG:1;
+	uint32_t rsv:7;	
+	uint16_t IF:8;
+	uint16_t WDMAID:2;	
+	uint16_t RXID:2;
+	uint16_t WCID:10;
+	uint16_t BSSID:6;
+	uint16_t rsv1:4;
+	uint16_t minfo:1;
+	uint16_t ntype:3;
+	uint16_t chid:8;
+	uint16_t rsv2:4;	
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	u16 SOURCE;
+	u16 DEST;
+#endif
+	u16 MAGIC_TAG_PROTECT1;
+} __packed;
+#endif
+
+#if defined(CONFIG_ARCH_COLGIN)
+#define FOE_INFO_LEN		    12
+#define WIFI_INFO_LEN		    6
+#else
+#define FOE_INFO_LEN		    9
+#define WIFI_INFO_LEN		    3
+#endif
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_INFO_LEN		    (6 + 4 + WIFI_INFO_LEN)
+#define FOE_MAGIC_FASTPATH	    0x77
+#define FOE_MAGIC_L2TPPATH	    0x78
+#endif
+
+#define FOE_MAGIC_PCI		    0x73
+#define FOE_MAGIC_WLAN		    0x74
+#define FOE_MAGIC_GE		    0x75
+#define FOE_MAGIC_PPE		    0x76
+#define FOE_MAGIC_WED0		    0x78
+#define FOE_MAGIC_WED1		    0x79
+#define FOE_MAGIC_MED		    0x80
+#define FOE_MAGIC_EDMA0		    0x81
+#define FOE_MAGIC_EDMA1		    0x82
+#define TAG_PROTECT                 0x6789
+#define USE_HEAD_ROOM               0
+#define USE_TAIL_ROOM               1
+#define USE_CB                      2
+#define ALL_INFO_ERROR              3
+
+#define FOE_TAG_PROTECT(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT)
+
+#define FOE_ENTRY_NUM_LSB(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num)
+
+#define FOE_ENTRY_NUM_MSB(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num_1)
+
+#define FOE_ENTRY_NUM(skb)  \
+	(((FOE_ENTRY_NUM_MSB(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB(skb))
+
+
+#define FOE_ALG(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->ALG)
+#define FOE_AI(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->CRSN)
+#define FOE_SP(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SPORT)
+#define FOE_MAGIC_TAG(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->IF)
+
+
+#define FOE_WDMA_ID(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->RXID)
+#define FOE_WC_ID(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->BSSID)
+#define FOE_WDMA_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->RXID)
+#define FOE_WC_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->BSSID)
+#define FOE_WDMA_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->RXID)
+#define FOE_WC_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->BSSID)
+#define FOE_WDMA_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WDMAID)
+#define FOE_RX_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->RXID)
+#define FOE_WC_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WCID)
+#define FOE_BSS_ID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->BSSID)
+#define FOE_WDMA_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->WDMAID)
+#define FOE_RX_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->RXID)
+#define FOE_WC_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->WCID)
+#define FOE_BSS_ID_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->head))->BSSID)
+
+
+#if defined(CONFIG_ARCH_COLGIN)
+
+
+#define FOE_MINFO(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->minfo)
+#define FOE_MINFO_NTYPE(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->ntype)
+#define FOE_MINFO_CHID(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->chid)
+#define FOE_MINFO_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->minfo)
+#define FOE_MINFO_NTYPE_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->ntype)
+#define FOE_MINFO_CHID_HEAD(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->chid)
+
+#define FOE_MINFO_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->minfo)
+#define FOE_MINFO_NTYPE_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ntype)
+#define FOE_MINFO_CHID_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->chid)
+
+#endif
+
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST(skb)	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+
+#define IS_SPACE_AVAILABLE_HEAD(skb)  \
+	((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+#define IS_SPACE_AVAILABLE_HEAD(skb)  \
+	((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+#define FOE_INFO_START_ADDR_HEAD(skb)	(skb->head)
+
+#define FOE_TAG_PROTECT_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT)
+#define FOE_ENTRY_NUM_LSB_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num)
+#define FOE_ENTRY_NUM_MSB_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num_1)
+	
+#define FOE_ENTRY_NUM_HEAD(skb)  \
+	(((FOE_ENTRY_NUM_MSB_HEAD(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_HEAD(skb))
+
+
+#define FOE_ALG_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->ALG)
+#define FOE_AI_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->CRSN)
+#define FOE_SP_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SPORT)
+#define FOE_MAGIC_TAG_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->IF)
+
+
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
+#define FOE_DEST_HEAD(skb)  \
+	(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
+#endif
+#define IS_SPACE_AVAILABLE_TAIL(skb)  \
+	(((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0))
+#define IS_SPACE_AVAILABLE_TAIL(skb)  \
+	(((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0))
+#define FOE_INFO_START_ADDR_TAIL(skb)  \
+	((unsigned char *)(long)(skb_end_pointer(skb) - FOE_INFO_LEN))
+
+#define FOE_TAG_PROTECT_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->MAGIC_TAG_PROTECT)
+#define FOE_ENTRY_NUM_LSB_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num)
+
+#define FOE_ENTRY_NUM_MSB_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num_1)
+#define FOE_ENTRY_NUM_TAIL(skb)  \
+	(((FOE_ENTRY_NUM_MSB_TAIL(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_TAIL(skb))
+#define FOE_ALG_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ALG)
+#define FOE_AI_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->CRSN)
+#define FOE_SP_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SPORT)
+#define FOE_MAGIC_TAG_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->IF)
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SOURCE)
+#define FOE_DEST_TAIL(skb)  \
+	(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->DEST)
+#endif
+
+/* change the position of skb_CB if necessary */
+#define CB_OFFSET		    40
+#define IS_SPACE_AVAILABLE_CB(skb)    1
+#define FOE_INFO_START_ADDR_CB(skb)    (skb->cb +  CB_OFFSET)
+#define FOE_TAG_PROTECT_CB0(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT0)
+#define FOE_TAG_PROTECT_CB1(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT1)
+#define FOE_ENTRY_NUM_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->foe_entry_num)
+#define FOE_ALG_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->ALG)
+#define FOE_AI_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->CRSN)
+#define FOE_SP_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SPORT)
+#define FOE_MAGIC_TAG_CB(skb)  \
+	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->IF)
+
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+#define FOE_SOURCE_CB(skb)	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SOURCE)
+#define FOE_DEST_CB(skb)	(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->DEST)
+#endif
+
+#define IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb)  \
+	(FOE_TAG_PROTECT_HEAD(skb) == TAG_PROTECT)
+#define IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb)  \
+	(FOE_TAG_PROTECT_TAIL(skb) == TAG_PROTECT)
+#define IS_MAGIC_TAG_PROTECT_VALID_CB(skb)  \
+	((FOE_TAG_PROTECT_CB0(skb) == TAG_PROTECT) && \
+	(FOE_TAG_PROTECT_CB0(skb) == FOE_TAG_PROTECT_CB1(skb)))
+
+#define IS_IF_PCIE_WLAN_HEAD(skb)  \
+	((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \
+	(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE))
+
+#define IS_IF_PCIE_WLAN_TAIL(skb)  \
+	((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN))
+
+#define IS_IF_PCIE_WLAN_CB(skb)  \
+	((FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_PCI) || \
+	(FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_WLAN))
+
+/* macros */
+#define magic_tag_set_zero(skb) \
+{ \
+	if ((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \
+	    (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE)) { \
+		if (IS_SPACE_AVAILABLE_HEAD(skb)) \
+			FOE_MAGIC_TAG_HEAD(skb) = 0; \
+	} \
+	if ((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN) || \
+	    (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_GE)) { \
+		if (IS_SPACE_AVAILABLE_TAIL(skb)) \
+			FOE_MAGIC_TAG_TAIL(skb) = 0; \
+	} \
+}
+
+static inline void hwnat_set_l2tp_unhit(struct iphdr *iph, struct sk_buff *skb)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	/* only clear headeroom for TCP OR not L2TP packets */
+	if ((iph->protocol == 0x6) || (ntohs(udp_hdr(skb)->dest) != 1701)) {
+		if (IS_SPACE_AVAILABLE_HEAD(skb)) {
+			FOE_MAGIC_TAG(skb) = 0;
+			FOE_AI(skb) = UN_HIT;
+		}
+	}
+#endif
+}
+
+static inline void hwnat_set_l2tp_fast_path(u32 l2tp_fast_path, u32 pptp_fast_path)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	l2tp_fast_path = 1;
+	pptp_fast_path = 0;
+#endif
+}
+
+static inline void hwnat_clear_l2tp_fast_path(u32 l2tp_fast_path)
+{
+#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
+	l2tp_fast_path = 0;
+#endif
+}
+
+/* #define CONFIG_HW_NAT_IPI */
+#if defined(CONFIG_HW_NAT_IPI)
+extern int debug_level;
+int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+		struct rps_dev_flow **rflowp);
+uint32_t ppe_extif_rx_handler(struct sk_buff *skb);
+int hitbind_force_to_cpu_handler(struct sk_buff *skb, struct foe_entry *entry);
+extern unsigned int ipidbg[num_possible_cpus()][10];
+extern unsigned int ipidbg2[num_possible_cpus()][10];
+/* #define HNAT_IPI_RXQUEUE	1 */
+#define HNAT_IPI_DQ		1
+#define HNAT_IPI_HASH_NORMAL	0
+#define HNAT_IPI_HASH_VTAG		1
+#define HNAT_IPI_HASH_FROM_EXTIF	2
+#define HNAT_IPI_HASH_FROM_GMAC		4
+
+struct hnat_ipi_s {
+#if defined(HNAT_IPI_DQ)
+	struct sk_buff_head     skb_input_queue;
+	struct sk_buff_head     skb_process_queue;
+#elif defined(HNAT_IPI_RXQUEUE)
+	atomic_t rx_queue_num;
+	unsigned int rx_queue_ridx;
+	unsigned int rx_queue_widx;
+	struct sk_buff **rx_queue;
+#else
+	/* unsigned int dummy0[0]; */
+	struct sk_buff_head     skb_ipi_queue;
+	/* unsigned int dummy1[8]; */
+#endif
+	unsigned long time_rec, recv_time;
+	unsigned int ipi_accum;
+	/*hwnat ipi use*/
+	spinlock_t      ipilock;
+	struct tasklet_struct smp_func_call_tsk;
+} ____cacheline_aligned_in_smp;
+
+struct hnat_ipi_stat {
+	unsigned long drop_pkt_num_from_extif;
+	unsigned long drop_pkt_num_from_ppehit;
+	unsigned int smp_call_cnt_from_extif;
+	unsigned int smp_call_cnt_from_ppehit;
+	atomic_t cpu_status;
+	/* atomic_t cpu_status_from_extif; */
+	/* atomic_t cpu_status_from_ppehit; */
+
+	/* atomic_t hook_status_from_extif; */
+	/* atomic_t hook_status_from_ppehit; */
+} ____cacheline_aligned_in_smp;
+
+#define cpu_status_from_extif	cpu_status
+#define cpu_status_from_ppehit	cpu_status
+
+struct hnat_ipi_cfg {
+	unsigned int enable_from_extif;
+	unsigned int enable_from_ppehit;
+	unsigned int queue_thresh_from_extif;
+	unsigned int queue_thresh_from_ppehit;
+	unsigned int drop_pkt_from_extif;
+	unsigned int drop_pkt_from_ppehit;
+	unsigned int ipi_cnt_mod_from_extif;
+	unsigned int ipi_cnt_mod_from_ppehit;
+} ____cacheline_aligned_in_smp;
+
+int hnat_ipi_init(void);
+int hnat_ipi_de_init(void);
+#endif
+
+#define QDMA_RX		5
+#define PDMA_RX		0
+
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/util.c b/src/kernel/modules/netsys_driver/nat/hw_nat/util.c
new file mode 100644
index 0000000..ddc014c
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/util.c
@@ -0,0 +1,244 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+
+#include "util.h"
+
+void mac_reverse(uint8_t *mac)
+{
+	u8 tmp;
+	u8 i;
+
+	for (i = 5; i > 2; i--) {
+		tmp = mac[i];
+		mac[i] = mac[5 - i];
+		mac[5 - i] = tmp;
+	}
+}
+
+int get_next(char *src, int separator, char *dest)
+{
+	char *c;
+	int len = 0;
+
+	if ((!src) || (!dest))
+		return -1;
+
+	c = strchr(src, separator);
+	if (!c) {
+		strncpy(dest, src, len);
+		return -1;
+	}
+	len = c - src;
+	strncpy(dest, src, len);
+	dest[len] = '\0';
+	return len + 1;
+}
+
+static inline int atoi(char *s)
+{
+	int i = 0;
+
+	while (isdigit(*s))
+		i = i * 10 + *(s++) - '0';
+	return i;
+}
+
+/* Convert IP address from Hex to string */
+uint8_t *ip_to_str(IN uint32_t ip)
+{
+	static u8 buf[32];
+	u8 *ptr = (char *)&ip;
+	u8 c[4];
+
+	c[0] = *(ptr);
+	c[1] = *(ptr + 1);
+	c[2] = *(ptr + 2);
+	c[3] = *(ptr + 3);
+	sprintf(buf, "%d.%d.%d.%d", c[3], c[2], c[1], c[0]);
+	return buf;
+}
+
+unsigned int str_to_ip(IN char *str)
+{
+	int len;
+	char *ptr = str;
+	char buf[128];
+	unsigned char c[4];
+	int i;
+
+	for (i = 0; i < 3; ++i) {
+		len = get_next(ptr, '.', buf);
+		if (len == -1)
+			return 1;	/* parsing error */
+		c[i] = atoi(buf);
+		ptr += len;
+	}
+	c[3] = atoi(ptr);
+	return ((c[0] << 24) + (c[1] << 16) + (c[2] << 8) + c[3]);
+}
+
+/* calculate ip address range */
+/* start_ip <= x < end_ip */
+void cal_ip_range(u32 start_ip, uint32_t end_ip, uint8_t *M, uint8_t *E)
+{
+	u32 range = (end_ip + 1) - start_ip;
+	u32 i;
+
+	for (i = 0; i < 32; i++) {
+		if ((range >> i) & 0x01)
+			break;
+	}
+
+	if (i != 32) {
+		*M = range >> i;
+		*E = i;
+	} else {
+		*M = 0;
+		*E = 0;
+	}
+}
+
+void reg_modify_bits(unsigned int *addr, uint32_t data, uint32_t offset, uint32_t len)
+{
+	unsigned int mask = 0;
+	unsigned int value;
+	unsigned int i;
+
+	for (i = 0; i < len; i++)
+		mask |= 1 << (offset + i);
+
+	value = reg_read(addr);
+	value &= ~mask;
+	value |= (data << offset) & mask;
+	reg_write(addr, value);
+}
+
+static inline uint16_t csum_part(u32 o, uint32_t n, uint16_t old)
+{
+	u32 d[] = { o, n };
+
+	return csum_fold(csum_partial((char *)d, sizeof(d), old ^ 0xFFFF));
+}
+
+/*KeepAlive with new header mode will pass the modified packet to cpu.*/
+/* We must change to original packet to refresh NAT table.*/
+/*Recover TCP Src/Dst Port and recalculate tcp checksum*/
+void
+foe_to_org_tcphdr(IN struct foe_entry *entry, IN struct iphdr *iph,
+		  OUT struct tcphdr *th)
+{
+	/* TODO: how to recovery 6rd/dslite packet */
+	th->check =
+	    csum_part((th->source) ^ 0xffff,
+		      htons(entry->ipv4_hnapt.sport), th->check);
+	th->check =
+	    csum_part((th->dest) ^ 0xffff,
+		      htons(entry->ipv4_hnapt.dport), th->check);
+	th->check =
+	    csum_part(~(iph->saddr), htonl(entry->ipv4_hnapt.sip),
+		      th->check);
+	th->check =
+	    csum_part(~(iph->daddr), htonl(entry->ipv4_hnapt.dip),
+		      th->check);
+	th->source = htons(entry->ipv4_hnapt.sport);
+	th->dest = htons(entry->ipv4_hnapt.dport);
+}
+
+/* Recover UDP Src/Dst Port and recalculate udp checksum */
+
+void
+foe_to_org_udphdr(IN struct foe_entry *entry, IN struct iphdr *iph,
+		  OUT struct udphdr *uh)
+{
+	/* TODO: how to recovery 6rd/dslite packet */
+
+	uh->check =
+	    csum_part((uh->source) ^ 0xffff,
+		      htons(entry->ipv4_hnapt.sport), uh->check);
+	uh->check =
+	    csum_part((uh->dest) ^ 0xffff,
+		      htons(entry->ipv4_hnapt.dport), uh->check);
+	uh->check =
+	    csum_part(~(iph->saddr), htonl(entry->ipv4_hnapt.sip),
+		      uh->check);
+	uh->check =
+	    csum_part(~(iph->daddr), htonl(entry->ipv4_hnapt.dip),
+		      uh->check);
+	uh->source = htons(entry->ipv4_hnapt.sport);
+	uh->dest = htons(entry->ipv4_hnapt.dport);
+}
+
+ /* Recover Src/Dst IP and recalculate ip checksum*/
+
+void foe_to_org_iphdr(IN struct foe_entry *entry, OUT struct iphdr *iph)
+{
+	/* TODO: how to recovery 6rd/dslite packet */
+	iph->saddr = htonl(entry->ipv4_hnapt.sip);
+	iph->daddr = htonl(entry->ipv4_hnapt.dip);
+	iph->check = 0;
+	iph->check = ip_fast_csum((unsigned char *)(iph), iph->ihl);
+}
+
+void hwnat_memcpy(void *dest, void *src, u32 n)
+{
+	ether_addr_copy(dest, src);
+}
diff --git a/src/kernel/modules/netsys_driver/nat/hw_nat/util.h b/src/kernel/modules/netsys_driver/nat/hw_nat/util.h
new file mode 100644
index 0000000..1a0ba6f
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/hw_nat/util.h
@@ -0,0 +1,78 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual license.  When you use or
+ * distribute this software, you may choose to be licensed under
+ * version 2 of the GNU General Public License ("GPLv2 License")
+ * or BSD License.
+ *
+ * GPLv2 License
+ *
+ * Copyright(C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(C) 2017 MediaTek Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef _UTIL_WANTED
+#define _UTIL_WANTED
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "foe_fdb.h"
+#include "frame_engine.h"
+
+/*DEFINITIONS AND MACROS*/
+#define reg_read(phys)	(__raw_readl((void __iomem *)phys))
+#define reg_write(phys, val)	(__raw_writel(val, (void __iomem *)phys))
+
+/* EXPORT FUNCTION*/
+uint8_t *ip_to_str(uint32_t ip);
+void mac_reverse(uint8_t *mac);
+void reg_modify_bits(unsigned int *addr, uint32_t data, uint32_t offset, uint32_t len);
+void cal_ip_range(u32 start_ip, uint32_t end_ip, uint8_t *M, uint8_t *E);
+void foe_to_org_tcphdr(IN struct foe_entry *entry, IN struct iphdr *iph,
+		       OUT struct tcphdr *th);
+void foe_to_org_udphdr(IN struct foe_entry *entry, IN struct iphdr *iph,
+		       OUT struct udphdr *uh);
+void foe_to_org_iphdr(IN struct foe_entry *entry, OUT struct iphdr *iph);
+unsigned int str_to_ip(IN char *str);
+void hwnat_memcpy(void *dest, void *src, u32 n);
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/mii_mgr.h b/src/kernel/modules/netsys_driver/nat/include/mii_mgr.h
new file mode 100755
index 0000000..f8e0517
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/mii_mgr.h
@@ -0,0 +1,27 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include "raether.h"
+
+extern struct net_device *dev_raether;
+
+#define PHY_CONTROL_0		0x0004
+#define MDIO_PHY_CONTROL_0	(RALINK_ETH_MAC_BASE + PHY_CONTROL_0)
+#define enable_mdio(x)
+
diff --git a/src/kernel/modules/netsys_driver/nat/include/ra_dbg_proc.h b/src/kernel/modules/netsys_driver/nat/include/ra_dbg_proc.h
new file mode 100755
index 0000000..38e6037
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/ra_dbg_proc.h
@@ -0,0 +1,96 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_DBG_PROC_H
+#define RA_DBG_PROC_H
+
+#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+#include "raeth_config.h"
+
+extern struct net_device *dev_raether;
+
+void dump_qos(void);
+void dump_reg(struct seq_file *s);
+void dump_cp0(void);
+
+int debug_proc_init(void);
+void debug_proc_exit(void);
+
+int tso_len_update(int tso_len);
+int num_of_txd_update(int num_of_txd);
+void rtk_hal_dump_mib(void);
+#ifdef CONFIG_RAETH_LRO
+int lro_stats_update(struct net_lro_mgr *lro_mgr, bool all_flushed);
+#endif
+extern unsigned int M2Q_table[64];
+extern struct QDMA_txdesc *free_head;
+extern struct SFQ_table *sfq0;
+extern struct SFQ_table *sfq1;
+extern struct SFQ_table *sfq2;
+extern struct SFQ_table *sfq3;
+extern int init_schedule;
+extern int working_schedule;
+struct raeth_int_t {
+	unsigned int RX_COHERENT_CNT;
+	unsigned int RX_DLY_INT_CNT;
+	unsigned int TX_COHERENT_CNT;
+	unsigned int TX_DLY_INT_CNT;
+	unsigned int RING3_RX_DLY_INT_CNT;
+	unsigned int RING2_RX_DLY_INT_CNT;
+	unsigned int RING1_RX_DLY_INT_CNT;
+	unsigned int RXD_ERROR_CNT;
+	unsigned int ALT_RPLC_INT3_CNT;
+	unsigned int ALT_RPLC_INT2_CNT;
+	unsigned int ALT_RPLC_INT1_CNT;
+	unsigned int RX_DONE_INT3_CNT;
+	unsigned int RX_DONE_INT2_CNT;
+	unsigned int RX_DONE_INT1_CNT;
+	unsigned int RX_DONE_INT0_CNT;
+	unsigned int TX_DONE_INT3_CNT;
+	unsigned int TX_DONE_INT2_CNT;
+	unsigned int TX_DONE_INT1_CNT;
+	unsigned int TX_DONE_INT0_CNT;
+};
+
+int int_stats_update(unsigned int int_status);
+
+#define DUMP_EACH_PORT(base)					\
+	for (i = 0; i < 7; i++) {					\
+		mii_mgr_read(31, (base) + (i * 0x100), &pkt_cnt); \
+		seq_printf(seq, "%8u ", pkt_cnt);			\
+	}							\
+
+/* HW LRO functions */
+int hwlro_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void hwlro_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+
+int rss_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void rss_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+
+/* HW IO-Coherent functions */
+#ifdef	CONFIG_RAETH_HW_IOCOHERENT
+void hwioc_debug_proc_init(struct proc_dir_entry *proc_reg_dir);
+void hwioc_debug_proc_exit(struct proc_dir_entry *proc_reg_dir);
+#else
+static inline void hwioc_debug_proc_init(struct proc_dir_entry *proc_reg_dir)
+{
+}
+
+static inline void hwioc_debug_proc_exit(struct proc_dir_entry *proc_reg_dir)
+{
+}
+#endif /* CONFIG_RAETH_HW_IOCOHERENT */
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/ra_ethtool.h b/src/kernel/modules/netsys_driver/nat/include/ra_ethtool.h
new file mode 100755
index 0000000..cff52e2
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/ra_ethtool.h
@@ -0,0 +1,34 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_ETHTOOL_H
+#define RA_ETHTOOL_H
+
+extern struct net_device *dev_raether;
+
+/* ethtool related */
+void ethtool_init(struct net_device *dev);
+int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+u32 et_get_link(struct net_device *dev);
+unsigned char get_current_phy_address(void);
+int mdio_read(struct net_device *dev, int phy_id, int location);
+void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+
+/* for pseudo interface */
+void ethtool_virt_init(struct net_device *dev);
+int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+u32 et_virt_get_link(struct net_device *dev);
+int mdio_virt_read(struct net_device *dev, int phy_id, int location);
+void mdio_virt_write(struct net_device *dev, int phy_id, int location,
+		     int value);
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/ra_ioctl.h b/src/kernel/modules/netsys_driver/nat/include/ra_ioctl.h
new file mode 100755
index 0000000..b94cb33
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/ra_ioctl.h
@@ -0,0 +1,179 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _RAETH_IOCTL_H
+#define _RAETH_IOCTL_H
+
+/* ioctl commands */
+#define RAETH_SW_IOCTL          0x89F0
+#define RAETH_ESW_REG_READ		0x89F1
+#define RAETH_ESW_REG_WRITE		0x89F2
+#define RAETH_MII_READ			0x89F3
+#define RAETH_MII_WRITE			0x89F4
+#define RAETH_ESW_INGRESS_RATE		0x89F5
+#define RAETH_ESW_EGRESS_RATE		0x89F6
+#define RAETH_ESW_PHY_DUMP		0x89F7
+#define RAETH_QDMA_IOCTL		0x89F8
+#define RAETH_EPHY_IOCTL		0x89F9
+#define RAETH_MII_READ_CL45             0x89FC
+#define RAETH_MII_WRITE_CL45            0x89FD
+#define RAETH_QDMA_SFQ_WEB_ENABLE       0x89FE
+#define RAETH_SET_LAN_IP		0x89FF
+
+/* switch ioctl commands */
+#define SW_IOCTL_SET_EGRESS_RATE        0x0000
+#define SW_IOCTL_SET_INGRESS_RATE       0x0001
+#define SW_IOCTL_SET_VLAN               0x0002
+#define SW_IOCTL_DUMP_VLAN              0x0003
+#define SW_IOCTL_DUMP_TABLE             0x0004
+#define SW_IOCTL_ADD_L2_ADDR            0x0005
+#define SW_IOCTL_DEL_L2_ADDR            0x0006
+#define SW_IOCTL_ADD_MCAST_ADDR         0x0007
+#define SW_IOCTL_DEL_MCAST_ADDR         0x0008
+#define SW_IOCTL_DUMP_MIB               0x0009
+#define SW_IOCTL_ENABLE_IGMPSNOOP       0x000A
+#define SW_IOCTL_DISABLE_IGMPSNOOP      0x000B
+#define SW_IOCTL_SET_PORT_TRUNK         0x000C
+#define SW_IOCTL_GET_PORT_TRUNK         0x000D
+#define SW_IOCTL_SET_PORT_MIRROR        0x000E
+#define SW_IOCTL_GET_PHY_STATUS         0x000F
+#define SW_IOCTL_READ_REG               0x0010
+#define SW_IOCTL_WRITE_REG              0x0011
+#define SW_IOCTL_QOS_EN                 0x0012
+#define SW_IOCTL_QOS_SET_TABLE2TYPE     0x0013
+#define SW_IOCTL_QOS_GET_TABLE2TYPE     0x0014
+#define SW_IOCTL_QOS_SET_PORT2TABLE     0x0015
+#define SW_IOCTL_QOS_GET_PORT2TABLE     0x0016
+#define SW_IOCTL_QOS_SET_PORT2PRI       0x0017
+#define SW_IOCTL_QOS_GET_PORT2PRI       0x0018
+#define SW_IOCTL_QOS_SET_DSCP2PRI       0x0019
+#define SW_IOCTL_QOS_GET_DSCP2PRI       0x001a
+#define SW_IOCTL_QOS_SET_PRI2QUEUE      0x001b
+#define SW_IOCTL_QOS_GET_PRI2QUEUE      0x001c
+#define SW_IOCTL_QOS_SET_QUEUE_WEIGHT   0x001d
+#define SW_IOCTL_QOS_GET_QUEUE_WEIGHT   0x001e
+#define SW_IOCTL_SET_PHY_TEST_MODE      0x001f
+#define SW_IOCTL_GET_PHY_REG            0x0020
+#define SW_IOCTL_SET_PHY_REG            0x0021
+#define SW_IOCTL_VLAN_TAG               0x0022
+#define SW_IOCTL_CLEAR_TABLE            0x0023
+#define SW_IOCTL_CLEAR_VLAN             0x0024
+#define SW_IOCTL_SET_VLAN_MODE          0x0025
+
+/*****************QDMA IOCTL DATA*************/
+#define RAETH_QDMA_REG_READ		0x0000
+#define RAETH_QDMA_REG_WRITE		0x0001
+#define RAETH_QDMA_QUEUE_MAPPING        0x0002
+#define RAETH_QDMA_READ_CPU_CLK         0x0003
+/*********************************************/
+/******************EPHY IOCTL DATA************/
+/*MT7622 10/100 phy cal*/
+#define RAETH_VBG_IEXT_CALIBRATION	0x0000
+#define RAETH_TXG_R50_CALIBRATION	0x0001
+#define RAETH_TXG_OFFSET_CALIBRATION	0x0002
+#define RAETH_TXG_AMP_CALIBRATION	0x0003
+#define GE_TXG_R50_CALIBRATION		0x0004
+#define GE_TXG_OFFSET_CALIBRATION	0x0005
+#define GE_TXG_AMP_CALIBRATION		0x0006
+/*********************************************/
+#define REG_ESW_WT_MAC_MFC              0x10
+#define REG_ESW_ISC                     0x18
+#define REG_ESW_WT_MAC_ATA1             0x74
+#define REG_ESW_WT_MAC_ATA2             0x78
+#define REG_ESW_WT_MAC_ATWD             0x7C
+#define REG_ESW_WT_MAC_ATC              0x80
+
+#define REG_ESW_TABLE_TSRA1		0x84
+#define REG_ESW_TABLE_TSRA2		0x88
+#define REG_ESW_TABLE_ATRD		0x8C
+
+#define REG_ESW_VLAN_VTCR		0x90
+#define REG_ESW_VLAN_VAWD1		0x94
+#define REG_ESW_VLAN_VAWD2		0x98
+
+#if defined(CONFIG_MACH_MT7623)
+#define REG_ESW_VLAN_ID_BASE		0x100
+#else
+#define REG_ESW_VLAN_ID_BASE          0x50
+#endif
+#define REG_ESW_VLAN_MEMB_BASE		0x70
+#define REG_ESW_TABLE_SEARCH		0x24
+#define REG_ESW_TABLE_STATUS0		0x28
+#define REG_ESW_TABLE_STATUS1		0x2C
+#define REG_ESW_TABLE_STATUS2		0x30
+#define REG_ESW_WT_MAC_AD0		0x34
+#define REG_ESW_WT_MAC_AD1		0x38
+#define REG_ESW_WT_MAC_AD2		0x3C
+
+#if defined(CONFIG_MACH_MT7623)
+#define REG_ESW_MAX         0xFC
+#else
+#define REG_ESW_MAX			0x16C
+#endif
+#define REG_HQOS_MAX			0x3FFF
+
+struct esw_reg {
+	unsigned int off;
+	unsigned int val;
+};
+
+struct ra_mii_ioctl_data {
+	__u32 phy_id;
+	__u32 reg_num;
+	__u32 val_in;
+	__u32 val_out;
+	__u32 port_num;
+	__u32 dev_addr;
+	__u32 reg_addr;
+};
+
+struct ra_switch_ioctl_data {
+	unsigned int cmd;
+	unsigned int on_off;
+	unsigned int port;
+	unsigned int bw;
+	unsigned int vid;
+	unsigned int fid;
+	unsigned int port_map;
+	unsigned int rx_port_map;
+	unsigned int tx_port_map;
+	unsigned int igmp_query_interval;
+	unsigned int reg_addr;
+	unsigned int reg_val;
+	unsigned int mode;
+	unsigned int qos_queue_num;
+	unsigned int qos_type;
+	unsigned int qos_pri;
+	unsigned int qos_dscp;
+	unsigned int qos_table_idx;
+	unsigned int qos_weight;
+	unsigned char mac[6];
+};
+
+struct qdma_ioctl_data {
+	unsigned int cmd;
+	unsigned int off;
+	unsigned int val;
+};
+
+struct ephy_ioctl_data {
+	unsigned int cmd;
+};
+
+struct esw_rate {
+	unsigned int on_off;
+	unsigned int port;
+	unsigned int bw;	/*Mbps */
+};
+#endif	/* _RAETH_IOCTL_H */
diff --git a/src/kernel/modules/netsys_driver/nat/include/ra_mac.h b/src/kernel/modules/netsys_driver/nat/include/ra_mac.h
new file mode 100755
index 0000000..c329703
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/ra_mac.h
@@ -0,0 +1,30 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_MAC_H
+#define RA_MAC_H
+
+void ra2880stop(struct END_DEVICE *ei_local);
+void set_mac_address(unsigned char p[6]);
+void set_mac2_address(unsigned char p[6]);
+int str_to_ip(unsigned int *ip, const char *str);
+void enable_auto_negotiate(struct END_DEVICE *ei_local);
+void set_ge1_force_1000(void);
+void set_ge2_force_1000(void);
+void set_ge1_an(void);
+void set_ge2_an(void);
+void set_ge2_gmii(void);
+void set_ge0_gmii(void);
+void set_ge2_force_link_down(void);
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/ra_switch.h b/src/kernel/modules/netsys_driver/nat/include/ra_switch.h
new file mode 100755
index 0000000..201a697
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/ra_switch.h
@@ -0,0 +1,101 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_SWITCH_H
+#define RA_SWITCH_H
+
+#include  "./rtl8367c/include/rtk_switch.h"
+#include  "./rtl8367c/include/rtk_hal.h"
+#include  "./rtl8367c/include/port.h"
+#include  "./rtl8367c/include/vlan.h"
+#include  "./rtl8367c/include/rtl8367c_asicdrv_port.h"
+
+extern struct net_device *dev_raether;
+#define ANACAL_INIT		0x01
+#define ANACAL_ERROR		0xFD
+#define ANACAL_SATURATION	0xFE
+#define	ANACAL_FINISH		0xFF
+#define ANACAL_PAIR_A		0
+#define ANACAL_PAIR_B		1
+#define ANACAL_PAIR_C		2
+#define ANACAL_PAIR_D		3
+#define DAC_IN_0V		0x000
+#define DAC_IN_2V		0x0f0
+#define TX_AMP_OFFSET_0MV	0x20
+#define TX_AMP_OFFSET_VALID_BITS	6
+#define FE_CAL_P0			0
+#define FE_CAL_P1			1
+#if defined(CONFIG_MACH_LEOPARD)
+#define FE_CAL_COMMON			1
+#else
+#define FE_CAL_COMMON			0
+#endif
+
+void fe_sw_init(void);
+void fe_sw_preinit(struct END_DEVICE *ei_local);
+void fe_sw_deinit(struct END_DEVICE *ei_local);
+void sw_ioctl(struct ra_switch_ioctl_data *ioctl_data);
+irqreturn_t esw_interrupt(int irq, void *resv);
+irqreturn_t gsw_interrupt(int irq, void *resv);
+
+/* struct mtk_gsw -	the structure that holds the SoC specific data
+ * @dev:		The Device struct
+ * @base:		The base address
+ * @piac_offset:	The PIAC base may change depending on SoC
+ * @irq:		The IRQ we are using
+ * @port4:		The port4 mode on MT7620
+ * @autopoll:		Is MDIO autopolling enabled
+ * @ethsys:		The ethsys register map
+ * @pctl:		The pin control register map
+ * @clk_trgpll:		The trgmii pll clock
+ */
+struct mtk_gsw {
+	struct mtk_eth		*eth;
+	struct device		*dev;
+	void __iomem		*base;
+	u32			piac_offset;
+	int			irq;
+	int			port4;
+	unsigned long int	autopoll;
+
+	struct regmap		*ethsys;
+	struct regmap		*pctl;
+
+	int			trgmii_force;
+	bool			wllll;
+	bool			mcm;
+	struct pinctrl *pins;
+	struct pinctrl_state *ps_default;
+	struct pinctrl_state *ps_reset;
+	int reset_pin;
+	struct regulator *supply;
+	struct regulator *b3v;
+};
+
+extern u8 fe_cal_flag;
+extern u8 fe_cal_flag_mdix;
+extern u8 fe_cal_tx_offset_flag;
+extern u8 fe_cal_tx_offset_flag_mdix;
+extern u8 fe_cal_r50_flag;
+extern u8 fe_cal_vbg_flag;
+void fe_cal_r50(u8 port_num, u32 delay);
+void fe_cal_tx_amp(u8 port_num, u32 delay);
+void fe_cal_tx_amp_mdix(u8 port_num, u32 delay);
+void fe_cal_tx_offset(u8 port_num, u32 delay);
+void fe_cal_tx_offset_mdix(u8 port_num, u32 delay);
+void fe_cal_vbg(u8 port_num, u32 delay);
+/*giga port calibration*/
+void ge_cal_r50(u8 port_num, u32 delay);
+void ge_cal_tx_amp(u8 port_num, u32 delay);
+void ge_cal_tx_offset(u8 port_num, u32 delay);
+void do_ge_phy_all_analog_cal(u8 phyaddr);
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/raeth_config.h b/src/kernel/modules/netsys_driver/nat/include/raeth_config.h
new file mode 100755
index 0000000..d9eed66
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/raeth_config.h
@@ -0,0 +1,328 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETH_CONFIG_H
+#define RAETH_CONFIG_H
+
+/* compile flag for features */
+#define DELAY_INT
+
+#define CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+/*#define CONFIG_QDMA_QOS_WEB*/
+#define CONFIG_QDMA_QOS_MARK
+
+#if !defined(CONFIG_SOC_MT7621)
+#define CONFIG_RAETH_NAPI
+#define CONFIG_RAETH_TX_RX_INT_SEPARATION
+/*#define CONFIG_RAETH_NAPI_TX_RX*/
+#define CONFIG_RAETH_NAPI_RX_ONLY
+#endif
+
+#if defined(CONFIG_SOC_MT7621)
+#define CONFIG_GE1_RGMII_FORCE_1000
+#define CONFIG_GE1_RGMII_FORCE_1200
+#define CONFIG_RA_NETWORK_TASKLET_BH
+#endif
+/*CONFIG_RA_NETWORK_TASKLET_BH*/
+/*CONFIG_RA_NETWORK_WORKQUEUE_BH*/
+/*CONFIG_RAETH_SPECIAL_TAG*/
+#define CONFIG_RAETH_CHECKSUM_OFFLOAD
+#if !defined(CONFIG_SOC_MT7621)
+#define CONFIG_RAETH_HW_LRO
+#endif
+/* #define CONFIG_RAETH_HW_LRO_FORCE */
+/* #define CONFIG_RAETH_HW_LRO_DVT */
+#define CONFIG_RAETH_HW_VLAN_TX
+/*CONFIG_RAETH_HW_VLAN_RX*/
+#define CONFIG_RAETH_TSO
+/*#define CONFIG_RAETH_ETHTOOL*/
+#define CONFIG_RAETH_QDMA
+/*CONFIG_RAETH_QDMATX_QDMARX*/
+/*CONFIG_HW_SFQ*/
+#define CONFIG_RAETH_HW_IOCOHERENT
+#define	CONFIG_RAETH_GMAC2
+/*#define CONFIG_RAETH_RSS_4RING*/
+/*#define CONFIG_RAETH_RSS_2RING*/
+/* definitions */
+#ifdef	DELAY_INT
+#define FE_DLY_INT	BIT(0)
+#else
+#define FE_DLY_INT	(0)
+#endif
+#ifdef	CONFIG_RAETH_HW_LRO
+#define FE_HW_LRO	BIT(1)
+#else
+#define FE_HW_LRO	(0)
+#endif
+#ifdef	CONFIG_RAETH_HW_LRO_FORCE
+#define FE_HW_LRO_FPORT	BIT(2)
+#else
+#define FE_HW_LRO_FPORT	(0)
+#endif
+#ifdef	CONFIG_RAETH_LRO
+#define FE_SW_LRO	BIT(3)
+#else
+#define FE_SW_LRO	(0)
+#endif
+#ifdef	CONFIG_RAETH_QDMA
+#define FE_QDMA		BIT(4)
+#else
+#define FE_QDMA		(0)
+#endif
+#ifdef	CONFIG_RAETH_NAPI
+#define FE_INT_NAPI	BIT(5)
+#else
+#define FE_INT_NAPI	(0)
+#endif
+#ifdef	CONFIG_RA_NETWORK_WORKQUEUE_BH
+#define FE_INT_WORKQ	BIT(6)
+#else
+#define FE_INT_WORKQ	(0)
+#endif
+#ifdef	CONFIG_RA_NETWORK_TASKLET_BH
+#define FE_INT_TASKLET	BIT(7)
+#else
+#define FE_INT_TASKLET	(0)
+#endif
+#ifdef	CONFIG_RAETH_TX_RX_INT_SEPARATION
+#define FE_IRQ_SEPARATE	BIT(8)
+#else
+#define FE_IRQ_SEPARATE	(0)
+#endif
+#define FE_GE2_SUPPORT	BIT(9)
+#ifdef	CONFIG_RAETH_ETHTOOL
+#define FE_ETHTOOL	BIT(10)
+#else
+#define FE_ETHTOOL	(0)
+#endif
+#ifdef	CONFIG_RAETH_CHECKSUM_OFFLOAD
+#define FE_CSUM_OFFLOAD	BIT(11)
+#else
+#define FE_CSUM_OFFLOAD	(0)
+#endif
+#ifdef	CONFIG_RAETH_TSO
+#define FE_TSO		BIT(12)
+#else
+#define FE_TSO		(0)
+#endif
+#ifdef	CONFIG_RAETH_TSOV6
+#define FE_TSO_V6	BIT(13)
+#else
+#define FE_TSO_V6	(0)
+#endif
+#ifdef	CONFIG_RAETH_HW_VLAN_TX
+#define FE_HW_VLAN_TX	BIT(14)
+#else
+#define FE_HW_VLAN_TX	(0)
+#endif
+#ifdef	CONFIG_RAETH_HW_VLAN_RX
+#define FE_HW_VLAN_RX	BIT(15)
+#else
+#define FE_HW_VLAN_RX	(0)
+#endif
+#ifdef	CONFIG_RAETH_QDMA
+#define FE_QDMA_TX	BIT(16)
+#else
+#define FE_QDMA_TX	(0)
+#endif
+#ifdef	CONFIG_RAETH_QDMATX_QDMARX
+#define FE_QDMA_RX	BIT(17)
+#else
+#define FE_QDMA_RX	(0)
+#endif
+#ifdef	CONFIG_HW_SFQ
+#define FE_HW_SFQ	BIT(18)
+#else
+#define FE_HW_SFQ	(0)
+#endif
+#define FE_HW_IOCOHERENT BIT(19)
+
+#ifdef	CONFIG_MTK_FPGA
+#define FE_FPGA_MODE	BIT(20)
+#else
+#define FE_FPGA_MODE	(0)
+#endif
+
+#ifdef CONFIG_RAETH_RSS_4RING
+#define FE_RSS_4RING	BIT(20)
+#else
+#define FE_RSS_4RING	(0)
+#endif
+
+#ifdef CONFIG_RAETH_RSS_2RING
+#define FE_RSS_2RING	BIT(2)
+#else
+#define FE_RSS_2RING	(0)
+#endif
+
+#ifdef	CONFIG_RAETH_HW_LRO_REASON_DBG
+#define FE_HW_LRO_DBG	BIT(21)
+#else
+#define FE_HW_LRO_DBG	(0)
+#endif
+#ifdef CONFIG_RAETH_INT_DBG
+#define FE_RAETH_INT_DBG	BIT(22)
+#else
+#define FE_RAETH_INT_DBG	(0)
+#endif
+#ifdef CONFIG_USER_SNMPD
+#define USER_SNMPD	BIT(23)
+#else
+#define USER_SNMPD	(0)
+#endif
+#ifdef CONFIG_TASKLET_WORKQUEUE_SW
+#define TASKLET_WORKQUEUE_SW	BIT(24)
+#else
+#define TASKLET_WORKQUEUE_SW	(0)
+#endif
+#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
+#define FE_HW_NAT	BIT(25)
+#else
+#define FE_HW_NAT	(0)
+#endif
+#ifdef	CONFIG_RAETH_NAPI_TX_RX
+#define FE_INT_NAPI_TX_RX	BIT(26)
+#else
+#define FE_INT_NAPI_TX_RX	(0)
+#endif
+#ifdef	CONFIG_QDMA_MQ
+#define QDMA_MQ       BIT(27)
+#else
+#define QDMA_MQ       (0)
+#endif
+#ifdef	CONFIG_RAETH_NAPI_RX_ONLY
+#define FE_INT_NAPI_RX_ONLY	BIT(28)
+#else
+#define FE_INT_NAPI_RX_ONLY	(0)
+#endif
+#ifdef	CONFIG_QDMA_SUPPORT_QOS
+#define FE_QDMA_FQOS	BIT(29)
+#else
+#define FE_QDMA_FQOS	(0)
+#endif
+
+#ifdef	CONFIG_QDMA_QOS_WEB
+#define QDMA_QOS_WEB	BIT(30)
+#else
+#define QDMA_QOS_WEB	(0)
+#endif
+
+#ifdef	CONFIG_QDMA_QOS_MARK
+#define QDMA_QOS_MARK	BIT(31)
+#else
+#define QDMA_QOS_MARK	(0)
+#endif
+
+#define MT7626_FE	(7626)
+#define MT7623_FE	(7623)
+#define MT7622_FE	(7622)
+#define MT7621_FE	(7621)
+#define LEOPARD_FE		(1985)
+
+#define GMAC2 BIT(0)
+#define LAN_WAN_SUPPORT BIT(1)
+#define WAN_AT_P0 BIT(2)
+#define WAN_AT_P4 BIT(3)
+#if defined(CONFIG_GE1_RGMII_FORCE_1000)
+#define    GE1_RGMII_FORCE_1000		BIT(4)
+#define    GE1_TRGMII_FORCE_2000	(0)
+#define    GE1_TRGMII_FORCE_2600	(0)
+#define    MT7530_TRGMII_PLL_25M	(0x0A00)
+#define    MT7530_TRGMII_PLL_40M	(0x0640)
+#elif defined(CONFIG_GE1_TRGMII_FORCE_2000)
+#define    GE1_TRGMII_FORCE_2000	BIT(5)
+#define    GE1_RGMII_FORCE_1000		(0)
+#define    GE1_TRGMII_FORCE_2600	(0)
+#define    MT7530_TRGMII_PLL_25M	(0x1400)
+#define    MT7530_TRGMII_PLL_40M	(0x0C80)
+#elif defined(CONFIG_GE1_TRGMII_FORCE_2600)
+#define    GE1_TRGMII_FORCE_2600	BIT(6)
+#define    GE1_RGMII_FORCE_1000		(0)
+#define    GE1_TRGMII_FORCE_2000	(0)
+#define    MT7530_TRGMII_PLL_25M	(0x1A00)
+#define    MT7530_TRGMII_PLL_40M	(0x1040)
+#define    TRGMII
+#else
+#define    GE1_RGMII_FORCE_1000		(0)
+#define    GE1_TRGMII_FORCE_2000	(0)
+#define    GE1_TRGMII_FORCE_2600	(0)
+#define    MT7530_TRGMII_PLL_25M	(0)
+#define    MT7530_TRGMII_PLL_40M	(0)
+#endif
+
+#define    GE1_RGMII_AN    BIT(7)
+#define    GE1_SGMII_AN    BIT(8)
+#define    GE1_SGMII_FORCE_2500    BIT(9)
+#define    GE1_RGMII_ONE_EPHY    BIT(10)
+#define    RAETH_ESW    BIT(11)
+#define    GE1_RGMII_NONE    BIT(12)
+#define    GE2_RGMII_FORCE_1000    BIT(13)
+#define    GE2_RGMII_AN    BIT(14)
+#define    GE2_INTERNAL_GPHY    BIT(15)
+#define    GE2_SGMII_AN    BIT(16)
+#define    GE2_SGMII_FORCE_2500    BIT(17)
+#define    MT7622_EPHY    BIT(18)
+#define    RAETH_SGMII	BIT(19)
+#define    GE2_RAETH_SGMII	BIT(20)
+#define    LEOPARD_EPHY	BIT(21)
+#define    SGMII_SWITCH	BIT(22)
+#define    LEOPARD_EPHY_GMII BIT(23)
+/* /#ifndef CONFIG_MAC_TO_GIGAPHY_MODE_ADDR */
+/* #define CONFIG_MAC_TO_GIGAPHY_MODE_ADDR (0) */
+/* #endif */
+/* #ifndef CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 */
+/* #define CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 (0) */
+/* #endif */
+
+/* macros */
+#define fe_features_config(end_device)	\
+{					\
+end_device->features = 0;		\
+end_device->features |= FE_DLY_INT;	\
+end_device->features |= FE_HW_LRO;	\
+end_device->features |= FE_HW_LRO_FPORT;\
+end_device->features |= FE_HW_LRO_DBG;	\
+end_device->features |= FE_SW_LRO;	\
+end_device->features |= FE_QDMA;	\
+end_device->features |= FE_INT_NAPI;	\
+end_device->features |= FE_INT_WORKQ;	\
+end_device->features |= FE_INT_TASKLET;	\
+end_device->features |= FE_IRQ_SEPARATE;\
+end_device->features |= FE_ETHTOOL;	\
+end_device->features |= FE_CSUM_OFFLOAD;\
+end_device->features |= FE_TSO;		\
+end_device->features |= FE_TSO_V6;	\
+end_device->features |= FE_HW_VLAN_TX;	\
+end_device->features |= FE_HW_VLAN_RX;	\
+end_device->features |= FE_QDMA_TX;	\
+end_device->features |= FE_QDMA_RX;	\
+end_device->features |= FE_HW_SFQ;	\
+end_device->features |= FE_FPGA_MODE;	\
+end_device->features |= FE_HW_NAT;	\
+end_device->features |= FE_INT_NAPI_TX_RX; \
+end_device->features |= FE_INT_NAPI_RX_ONLY; \
+end_device->features |= FE_QDMA_FQOS;	\
+end_device->features |= QDMA_QOS_WEB;	\
+end_device->features |= QDMA_QOS_MARK;	\
+end_device->features |= FE_RSS_4RING;	\
+end_device->features |= FE_RSS_2RING;	\
+}
+
+#define fe_architecture_config(end_device)              \
+{                                                       \
+end_device->architecture = 0;                           \
+end_device->architecture |= GE1_TRGMII_FORCE_2000;      \
+end_device->architecture |= GE1_TRGMII_FORCE_2600;      \
+}
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/raeth_reg.h b/src/kernel/modules/netsys_driver/nat/include/raeth_reg.h
new file mode 100755
index 0000000..8078ccf
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/raeth_reg.h
@@ -0,0 +1,1352 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETH_REG_H
+#define RAETH_REG_H
+
+#include <linux/mii.h>		/* for struct mii_if_info in ra2882ethreg.h */
+#include <linux/version.h>	/* check linux version */
+#include <linux/interrupt.h>	/* for "struct tasklet_struct" */
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/workqueue.h>
+#include <linux/inet_lro.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include "raether.h"
+
+#define MAX_PACKET_SIZE	1514
+#define	MIN_PACKET_SIZE 60
+#if defined(CONFIG_MACH_MT7623) || defined(CONFIG_SOC_MT7621)
+#define MAX_PTXD_LEN 0x3fff	/* 16k */
+#define MAX_QTXD_LEN 0x3fff	/* 16k */
+#else
+#define MAX_PTXD_LEN 0x3fff	/* 16k */
+#define MAX_QTXD_LEN 0xffff
+#endif
+
+#define phys_to_bus(a) (a)
+
+extern void __iomem *ethdma_sysctl_base;
+extern void __iomem *ethdma_frame_engine_base;
+
+/* bits range: for example BITS(16,23) = 0xFF0000
+ *   ==>  (BIT(m)-1)   = 0x0000FFFF     ~(BIT(m)-1)   => 0xFFFF0000
+ *   ==>  (BIT(n+1)-1) = 0x00FFFFFF
+ */
+#define BITS(m, n)   (~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n)))
+
+#define ETHER_ADDR_LEN  6
+
+/*  Phy Vender ID list */
+
+#define EV_ICPLUS_PHY_ID0 0x0243
+#define EV_ICPLUS_PHY_ID1 0x0D90
+#define EV_MARVELL_PHY_ID0 0x0141
+#define EV_MARVELL_PHY_ID1 0x0CC2
+#define EV_VTSS_PHY_ID0 0x0007
+#define EV_VTSS_PHY_ID1 0x0421
+
+#define ETHSYS_BASE 0x1b000000
+#define SGMII_CONFIG_0	BIT(9) /*SGMII path enable of GMAC1*/
+#define SGMII_CONFIG_1	BIT(8) /*SGMII path enable of GMAC1*/
+
+#if defined(CONFIG_PINCTRL_MT7622)
+#define SGMII_REG_BASE0	(0x1b128000)
+#define SGMII_REG_PHYA_BASE0	(0x1b12a000)
+#define SGMII_REG_BASE1	(0)
+#define SGMII_REG_PHYA_BASE1	(0)
+#elif defined(CONFIG_MACH_LEOPARD)
+#define SGMII_REG_BASE0	(0x1b128000)
+#define SGMII_REG_PHYA_BASE0	(0x1b128100)
+#define SGMII_REG_BASE1	(0x1b130000)
+#define SGMII_REG_PHYA_BASE1	(0x1b130100)
+#else
+#define SGMII_REG_BASE0	(0)
+#define SGMII_REG_PHYA_BASE0	(0)
+#define SGMII_REG_BASE1	(0)
+#define SGMII_REG_PHYA_BASE1	(0)
+#endif
+#define ETHSYS_MAC_BASE	(0x1b110000)
+
+#if defined(CONFIG_MACH_LEOPARD)
+#define FE_RSTCTL   0x1B000034
+#define INFRA_BASE  0x1000070C
+#define GEPHY_CTRL0 0x10000710
+#define GPIO_GO_BASE GEPHY_CTRL0
+#define GPIO_MODE_BASE 0x10217300
+#else
+#define INFRA_BASE  0
+#define FE_RSTCTL   0
+#define GPIO_GO_BASE 0x10211800
+#define GPIO_MODE_BASE 0x10211300
+#endif
+
+/* ETHDMASYS base address
+ * for I2S/PCM/GDMA/HSDMA/FE/GMAC
+ */
+#define ETHDMASYS_BASE			ethdma_sysctl_base
+#define ETHDMASYS_FRAME_ENGINE_BASE	ethdma_frame_engine_base
+
+#define ETHDMASYS_SYSCTL_BASE            ETHDMASYS_BASE
+#define ETHDMASYS_PPE_BASE		(ETHDMASYS_FRAME_ENGINE_BASE + 0x0C00)
+#define ETHDMASYS_ETH_MAC_BASE		(ETHDMASYS_FRAME_ENGINE_BASE + 0x10000)
+#if defined(CONFIG_MACH_MT7623) || defined(CONFIG_SOC_MT7621)
+#define ETHDMASYS_ETH_SW_BASE       (ETHDMASYS_FRAME_ENGINE_BASE + 0x10000)
+#else
+#define ETHDMASYS_ETH_SW_BASE		(ETHDMASYS_FRAME_ENGINE_BASE + 0x18000)
+#endif
+
+#define RALINK_FRAME_ENGINE_BASE	ETHDMASYS_FRAME_ENGINE_BASE
+#define RALINK_PPE_BASE                 ETHDMASYS_PPE_BASE
+#define RALINK_SYSCTL_BASE		ETHDMASYS_SYSCTL_BASE
+#define RALINK_ETH_MAC_BASE		ETHDMASYS_ETH_MAC_BASE
+#define RALINK_ETH_SW_BASE		ETHDMASYS_ETH_SW_BASE
+
+#define RSTCTL_FE_RST			BIT(6)
+#define RALINK_FE_RST			RSTCTL_FE_RST
+
+#define RSTCTL_ETH_RST			BIT(23)
+#define RALINK_ETH_RST			RSTCTL_ETH_RST
+
+/* FE_INT_STATUS */
+#define RX_COHERENT      BIT(31)
+#define RX_DLY_INT       BIT(30)
+#define TX_COHERENT      BIT(29)
+#define TX_DLY_INT       BIT(28)
+#define RING3_RX_DLY_INT    BIT(27)
+#define RING2_RX_DLY_INT    BIT(26)
+#define RING1_RX_DLY_INT    BIT(25)
+#define RING0_RX_DLY_INT    BIT(30)
+
+#define RSS_RX_INT0	 (RX_DONE_INT0 | RX_DONE_INT1 | \
+			  RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+
+#define RSS_RX_RING0	 (RX_DONE_INT0 | RING0_RX_DLY_INT)
+#define RSS_RX_RING1	 (RX_DONE_INT1 | RING1_RX_DLY_INT)
+#define RSS_RX_RING2	 (RX_DONE_INT2 | RING2_RX_DLY_INT)
+#define RSS_RX_RING3	 (RX_DONE_INT3 | RING3_RX_DLY_INT)
+
+#define RSS_RX_INT1	 (RX_DONE_INT2 | RX_DONE_INT3 | \
+			  RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RSS_RX_DLY_INT0	(RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+#define RSS_RX_DLY_INT1	(RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RSS_RX_DLY_INT	 (RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+			  RING2_RX_DLY_INT | RING3_RX_DLY_INT)
+
+#define RXD_ERROR	 BIT(24)
+#define ALT_RPLC_INT3    BIT(23)
+#define ALT_RPLC_INT2    BIT(22)
+#define ALT_RPLC_INT1    BIT(21)
+
+#define RX_DONE_INT3     BIT(19)
+#define RX_DONE_INT2     BIT(18)
+#define RX_DONE_INT1     BIT(17)
+#define RX_DONE_INT0     BIT(16)
+
+#define TX_DONE_INT3     BIT(3)
+#define TX_DONE_INT2     BIT(2)
+#define TX_DONE_INT1     BIT(1)
+#define TX_DONE_INT0     BIT(0)
+
+#define RLS_COHERENT     BIT(29)
+#define RLS_DLY_INT      BIT(28)
+#define RLS_DONE_INT     BIT(0)
+
+#define FE_INT_ALL		(TX_DONE_INT3 | TX_DONE_INT2 | \
+				 TX_DONE_INT1 | TX_DONE_INT0 | \
+				 RX_DONE_INT0 | RX_DONE_INT1 | \
+				 RX_DONE_INT2 | RX_DONE_INT3)
+
+#define QFE_INT_ALL		(RLS_DONE_INT | RX_DONE_INT0 | \
+				 RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3)
+#define QFE_INT_DLY_INIT	(RLS_DLY_INT | RX_DLY_INT)
+#define RX_INT_ALL		(RX_DONE_INT0 | RX_DONE_INT1 | \
+				 RX_DONE_INT2 | RX_DONE_INT3 | \
+				 RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+				 RING2_RX_DLY_INT | RING3_RX_DLY_INT | RX_DLY_INT)
+#define TX_INT_ALL		(TX_DONE_INT0 | TX_DLY_INT)
+
+#define NUM_QDMA_PAGE	    512
+#define QDMA_PAGE_SIZE      2048
+
+/* SW_INT_STATUS */
+#define ESW_PHY_POLLING		(RALINK_ETH_MAC_BASE + 0x0000)
+#define MAC1_WOL		(RALINK_ETH_SW_BASE + 0x0110)
+#define WOL_INT_CLR		BIT(17)
+#define WOL_INT_EN		BIT(1)
+#define WOL_EN			BIT(0)
+
+#define P5_LINK_CH		BIT(5)
+#define P4_LINK_CH		BIT(4)
+#define P3_LINK_CH		BIT(3)
+#define P2_LINK_CH		BIT(2)
+#define P1_LINK_CH		BIT(1)
+#define P0_LINK_CH		BIT(0)
+
+#define RX_BUF_ALLOC_SIZE	2000
+#define FASTPATH_HEADROOM	64
+
+#define ETHER_BUFFER_ALIGN	32	/* /// Align on a cache line */
+
+#define ETHER_ALIGNED_RX_SKB_ADDR(addr) \
+	((((unsigned long)(addr) + ETHER_BUFFER_ALIGN - 1) & \
+	~(ETHER_BUFFER_ALIGN - 1)) - (unsigned long)(addr))
+
+struct PSEUDO_ADAPTER {
+	struct net_device *raeth_dev;
+	struct net_device *pseudo_dev;
+	struct net_device_stats stat;
+	struct mii_if_info mii_info;
+};
+
+#define MAX_PSEUDO_ENTRY               1
+
+/* Register Categories Definition */
+#define RAFRAMEENGINE_OFFSET	0x0000
+#define RAGDMA_OFFSET		0x0020
+#define RAPSE_OFFSET		0x0040
+#define RAGDMA2_OFFSET		0x0060
+#define RACDMA_OFFSET		0x0080
+#define RAPDMA_OFFSET		0x0800
+#define SDM_OFFSET		0x0C00
+#define RAPPE_OFFSET		0x0200
+#define RACMTABLE_OFFSET	0x0400
+#define RAPOLICYTABLE_OFFSET	0x1000
+
+/* Register Map Detail */
+/* RT3883 */
+#define SYSCFG1			(RALINK_SYSCTL_BASE + 0x14)
+
+#define	FE_PSE_FREE		(RALINK_FRAME_ENGINE_BASE + 0x240)
+#define FE_DROP_FQ		(RALINK_FRAME_ENGINE_BASE + 0x244)
+#define FE_DROP_FC		(RALINK_FRAME_ENGINE_BASE + 0x248)
+#define FE_DROP_PPE		(RALINK_FRAME_ENGINE_BASE + 0x24c)
+#define FE_INT_GRP		(RALINK_FRAME_ENGINE_BASE + 0x20)
+/* Old FE with New PDMA */
+#define PDMA_RELATED            0x0800
+/* 1. PDMA */
+#define TX_BASE_PTR0	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x000)
+#define TX_MAX_CNT0	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x004)
+#define TX_CTX_IDX0	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x008)
+#define TX_DTX_IDX0	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x00C)
+
+#define TX_BASE_PTR1	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x010)
+#define TX_MAX_CNT1	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x014)
+#define TX_CTX_IDX1	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x018)
+#define TX_DTX_IDX1	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x01C)
+
+#define TX_BASE_PTR2	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x020)
+#define TX_MAX_CNT2	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x024)
+#define TX_CTX_IDX2	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x028)
+#define TX_DTX_IDX2	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x02C)
+
+#define TX_BASE_PTR3	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x030)
+#define TX_MAX_CNT3	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x034)
+#define TX_CTX_IDX3	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x038)
+#define TX_DTX_IDX3	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x03C)
+
+#define RX_BASE_PTR0	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x100)
+#define RX_MAX_CNT0	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x104)
+#define RX_CALC_IDX0	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x108)
+#define RX_DRX_IDX0	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x10C)
+
+#define RX_BASE_PTR1	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x110)
+#define RX_MAX_CNT1	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x114)
+#define RX_CALC_IDX1	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x118)
+#define RX_DRX_IDX1	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x11C)
+
+#define RX_BASE_PTR2	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x120)
+#define RX_MAX_CNT2	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x124)
+#define RX_CALC_IDX2	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x128)
+#define RX_DRX_IDX2	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x12C)
+
+#define RX_BASE_PTR3	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x130)
+#define RX_MAX_CNT3	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x134)
+#define RX_CALC_IDX3	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x138)
+#define RX_DRX_IDX3	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x13C)
+
+#define PDMA_INFO	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x200)
+#define PDMA_GLO_CFG	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x204)
+#define PDMA_RST_IDX	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x208)
+#define PDMA_RST_CFG	(PDMA_RST_IDX)
+#define DLY_INT_CFG	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x20C)
+#define FREEQ_THRES	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x210)
+#define INT_STATUS	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x220)
+#define FE_INT_STATUS	(INT_STATUS)
+#define INT_MASK	(RALINK_FRAME_ENGINE_BASE + PDMA_RELATED + 0x228)
+#define FE_INT_ENABLE	(INT_MASK)
+#define SCH_Q01_CFG	(RALINK_FRAME_ENGINE_BASE + RAPDMA_OFFSET + 0x280)
+#define SCH_Q23_CFG	(RALINK_FRAME_ENGINE_BASE + RAPDMA_OFFSET + 0x284)
+
+#define PDMA_INT_GRP1	(RALINK_FRAME_ENGINE_BASE + RAPDMA_OFFSET + 0x250)
+#define PDMA_INT_GRP2	(RALINK_FRAME_ENGINE_BASE + RAPDMA_OFFSET + 0x254)
+#define PDMA_INT_GRP3	(RALINK_FRAME_ENGINE_BASE + RAPDMA_OFFSET + 0x22c)
+
+#define FE_GLO_CFG		(RALINK_FRAME_ENGINE_BASE + 0x00)
+#define FE_RST_GL		(RALINK_FRAME_ENGINE_BASE + 0x04)
+#define FE_INT_STATUS2		(RALINK_FRAME_ENGINE_BASE + 0x08)
+#define FE_INT_ENABLE2		(RALINK_FRAME_ENGINE_BASE + 0x0c)
+#define MAC1_LINK	BIT(24)
+#define MAC2_LINK	BIT(25)
+/* #define FC_DROP_STA         RALINK_FRAME_ENGINE_BASE + 0x18 */
+#define FOE_TS_T		(RALINK_FRAME_ENGINE_BASE + 0x10)
+
+#define FE_GLO_MISC		(RALINK_FRAME_ENGINE_BASE + 0x124)
+
+#define GDMA1_RELATED       0x0500
+#define GDMA1_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
+#define GDMA1_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
+#define GDMA1_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
+#define GDMA1_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
+
+#define GDMA2_RELATED       0x1500
+#define GDMA2_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
+#define GDMA2_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
+#define GDMA2_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
+#define GDMA2_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
+
+#define GDMA1_SCH_CFG       GDMA1_SHPR_CFG
+#define GDMA2_SCH_CFG       GDMA2_SHPR_CFG
+
+#define PSE_RELATED         0x0040
+#define PSE_FQ_CFG          (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
+#define CDMA_FC_CFG         (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
+#define GDMA1_FC_CFG        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
+#define GDMA2_FC_CFG        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x0C)
+#define CDMA_OQ_STA         (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x10)
+#define GDMA1_OQ_STA        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x14)
+#define GDMA2_OQ_STA        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x18)
+#define PSE_IQ_STA          (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x1C)
+
+#define CDMA_RELATED        0x0400
+#define CDMA_CSG_CFG        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
+#define CDMP_IG_CTRL        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
+#define CDMP_EG_CTRL        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
+
+#define PDMA_FC_CFG	    (RALINK_FRAME_ENGINE_BASE + 0x100)
+
+/*kurtis: add QDMA define*/
+#define CLK_CFG_0		(RALINK_SYSCTL_BASE + 0x2C)
+#define PAD_RGMII2_MDIO_CFG     (RALINK_SYSCTL_BASE + 0x58)
+
+#define QDMA_RELATED            0x1800
+#define  QTX_CFG_0          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x000)
+#define  QTX_SCH_0          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x004)
+#define  QTX_HEAD_0         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x008)
+#define  QTX_TAIL_0         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x00C)
+#define  QTX_CFG_1          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x010)
+#define  QTX_SCH_1          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x014)
+#define  QTX_HEAD_1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x018)
+#define  QTX_TAIL_1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x01C)
+#define  QTX_CFG_2          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x020)
+#define  QTX_SCH_2          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x024)
+#define  QTX_HEAD_2         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x028)
+#define  QTX_TAIL_2         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x02C)
+#define  QTX_CFG_3          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x030)
+#define  QTX_SCH_3          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x034)
+#define  QTX_HEAD_3         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x038)
+#define  QTX_TAIL_3         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x03C)
+#define  QTX_CFG_4          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x040)
+#define  QTX_SCH_4          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x044)
+#define  QTX_HEAD_4         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x048)
+#define  QTX_TAIL_4         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x04C)
+#define  QTX_CFG_5          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x050)
+#define  QTX_SCH_5          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x054)
+#define  QTX_HEAD_5         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x058)
+#define  QTX_TAIL_5         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x05C)
+#define  QTX_CFG_6          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x060)
+#define  QTX_SCH_6          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x064)
+#define  QTX_HEAD_6         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x068)
+#define  QTX_TAIL_6         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x06C)
+#define  QTX_CFG_7          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x070)
+#define  QTX_SCH_7          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x074)
+#define  QTX_HEAD_7         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x078)
+#define  QTX_TAIL_7         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x07C)
+#define  QTX_CFG_8          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x080)
+#define  QTX_SCH_8          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x084)
+#define  QTX_HEAD_8         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x088)
+#define  QTX_TAIL_8         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x08C)
+#define  QTX_CFG_9          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x090)
+#define  QTX_SCH_9          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x094)
+#define  QTX_HEAD_9         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x098)
+#define  QTX_TAIL_9         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x09C)
+#define  QTX_CFG_10         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A0)
+#define  QTX_SCH_10         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A4)
+#define  QTX_HEAD_10        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A8)
+#define  QTX_TAIL_10        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0AC)
+#define  QTX_CFG_11         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B0)
+#define  QTX_SCH_11         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B4)
+#define  QTX_HEAD_11        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B8)
+#define  QTX_TAIL_11        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0BC)
+#define  QTX_CFG_12         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C0)
+#define  QTX_SCH_12         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C4)
+#define  QTX_HEAD_12        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C8)
+#define  QTX_TAIL_12        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0CC)
+#define  QTX_CFG_13         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D0)
+#define  QTX_SCH_13         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D4)
+#define  QTX_HEAD_13        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D8)
+#define  QTX_TAIL_13        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0DC)
+#define  QTX_CFG_14         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E0)
+#define  QTX_SCH_14         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E4)
+#define  QTX_HEAD_14        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E8)
+#define  QTX_TAIL_14        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0EC)
+#define  QTX_CFG_15         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F0)
+#define  QTX_SCH_15         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F4)
+#define  QTX_HEAD_15        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F8)
+#define  QTX_TAIL_15        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0FC)
+#define  QRX_BASE_PTR_0     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x100)
+#define  QRX_MAX_CNT_0      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x104)
+#define  QRX_CRX_IDX_0      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x108)
+#define  QRX_DRX_IDX_0      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x10C)
+#define  QRX_BASE_PTR_1     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x110)
+#define  QRX_MAX_CNT_1      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x114)
+#define  QRX_CRX_IDX_1      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x118)
+#define  QRX_DRX_IDX_1      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x11C)
+
+#define  VQTX_TB_BASE_0     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x180)
+#define  VQTX_TB_BASE_1     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x184)
+#define  VQTX_TB_BASE_2     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x188)
+#define  VQTX_TB_BASE_3     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x18C)
+
+#define  QDMA_INFO          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x200)
+#define  QDMA_GLO_CFG       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x204)
+#define  QDMA_RST_IDX       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x208)
+#define  QDMA_RST_CFG       (QDMA_RST_IDX)
+#define  QDMA_DELAY_INT     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x20C)
+#define  QDMA_FC_THRES      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x210)
+#define  QDMA_TX_SCH        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x214)
+#define  QDMA_INT_STS       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x218)
+#define  QFE_INT_STATUS		  (QDMA_INT_STS)
+#define  QDMA_INT_MASK      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x21C)
+#define  QFE_INT_ENABLE		  (QDMA_INT_MASK)
+#define  QDMA_TRTCM         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x220)
+#define  QDMA_DATA0         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x224)
+#define  QDMA_DATA1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x228)
+#define  QDMA_RED_THRES     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x22C)
+#define  QDMA_TEST          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x230)
+#define  QDMA_DMA           (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x234)
+#define  QDMA_BMU           (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x238)
+#define  QDMA_HRED1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x240)
+#define  QDMA_HRED2         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x244)
+#define  QDMA_SRED1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x248)
+#define  QDMA_SRED2         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x24C)
+#define  QTX_MIB_IF         (RALINK_FRAME_ENGINE_BASE + 0x1abc)
+#define  QTX_CTX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x300)
+#define  QTX_DTX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x304)
+#define  QTX_FWD_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x308)
+#define  QTX_CRX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x310)
+#define  QTX_DRX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x314)
+#define  QTX_RLS_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x318)
+#define  QDMA_FQ_HEAD       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x320)
+#define  QDMA_FQ_TAIL       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x324)
+#define  QDMA_FQ_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x328)
+#define  QDMA_FQ_BLEN       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x32C)
+#define  QTX_Q0MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x350)
+#define  QTX_Q1MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x354)
+#define  QTX_Q2MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x358)
+#define  QTX_Q3MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x35C)
+#define  QTX_Q0MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x360)
+#define  QTX_Q1MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x364)
+#define  QTX_Q2MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x368)
+#define  QTX_Q3MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x36C)
+#define  QDMA_INT_GRP1	    (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x220)
+#define  QDMA_INT_GRP2	    (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x224)
+
+#define DELAY_INT_INIT		0x8f0f8f0f
+#define FE_INT_DLY_INIT		(TX_DLY_INT | RX_DLY_INT)
+#define RSS_INT_DLY_INT_2RING	(RING0_RX_DLY_INT | RING1_RX_DLY_INT)
+#define RSS_INT_DLY_INT		(RING0_RX_DLY_INT | RING1_RX_DLY_INT | \
+				 RING2_RX_DLY_INT | RING3_RX_DLY_INT | TX_DLY_INT)
+/* 6. Counter and Meter Table */
+/* PPE Accounting Group 0 Byte Cnt */
+#define PPE_AC_BCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x000)
+/* PPE Accounting Group 0 Packet Cnt */
+#define PPE_AC_PCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x004)
+/* 0 ~ 63 */
+#define PPE_MTR_CNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x200)
+/* skip... */
+#define PPE_MTR_CNT63	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x2FC)
+
+/* Transmit good byte cnt for GEport */
+#define GDMA_TX_GBCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x300)
+/* Transmit good pkt cnt for GEport */
+#define GDMA_TX_GPCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x304)
+/* Transmit skip cnt for GEport */
+#define GDMA_TX_SKIPCNT0 (RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x308)
+/* Transmit collision cnt for GEport */
+#define GDMA_TX_COLCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x30C)
+
+/* update these address mapping to fit data sheet v0.26,
+ * by bobtseng, 2007.6.14
+ */
+#define GDMA_RX_GBCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x320)
+#define GDMA_RX_GPCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x324)
+#define GDMA_RX_OERCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x328)
+#define GDMA_RX_FERCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x32C)
+#define GDMA_RX_SERCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x330)
+#define GDMA_RX_LERCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x334)
+#define GDMA_RX_CERCNT0	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x338)
+#define GDMA_RX_FCCNT1	(RALINK_FRAME_ENGINE_BASE + RACMTABLE_OFFSET + 0x33C)
+
+/* LRO global control */
+/* Bits [15:0]:LRO_ALT_RFSH_TIMER, Bits [20:16]:LRO_ALT_TICK_TIMER */
+#define LRO_ALT_REFRESH_TIMER   (RALINK_FRAME_ENGINE_BASE + 0x001C)
+
+/* LRO auto-learn table info */
+#define PDMA_FE_ALT_CF8		(RALINK_FRAME_ENGINE_BASE + 0x0300)
+#define PDMA_FE_ALT_SGL_CFC	(RALINK_FRAME_ENGINE_BASE + 0x0304)
+#define PDMA_FE_ALT_SEQ_CFC	(RALINK_FRAME_ENGINE_BASE + 0x0308)
+
+/* LRO controls */
+#define ADMA_LRO_CTRL_OFFSET    0x0980
+/*Bit [0]:LRO_EN, Bit [1]:LRO_IPv6_EN, Bit [2]:MULTIPLE_NON_LRO_RX_RING_EN,
+ * Bit [3]:MULTIPLE_RXD_PREFETCH_EN, Bit [4]:RXD_PREFETCH_EN,
+ * Bit [5]:LRO_DLY_INT_EN, Bit [6]:LRO_CRSN_BNW, Bit [7]:L3_CKS_UPD_EN,
+ * Bit [20]:first_ineligible_pkt_redirect_en, Bit [21]:cr_lro_alt_score_mode,
+ * Bit [22]:cr_lro_alt_rplc_mode, Bit [23]:cr_lro_l4_ctrl_psh_en,
+ * Bits [28:26]:LRO_RING_RELINGUISH_REQ, Bits [31:29]:LRO_RING_RELINGUISH_DONE
+ */
+#define ADMA_LRO_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			   ADMA_LRO_CTRL_OFFSET + 0x00)
+/* Bits [31:0]:LRO_CPU_REASON */
+#define ADMA_LRO_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			   ADMA_LRO_CTRL_OFFSET + 0x04)
+/* Bits [31:0]:AUTO_LEARN_LRO_ELIGIBLE_THRESHOLD */
+#define ADMA_LRO_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			   ADMA_LRO_CTRL_OFFSET + 0x08)
+/*Bits [7:0]:LRO_MAX_AGGREGATED_CNT,
+ * Bits [11:8]:LRO_VLAN_EN, Bits [13:12]:LRO_VLAN_VID_CMP_DEPTH,
+ * Bit [14]:ADMA_FW_RSTN_REQ, Bit [15]:ADMA_MODE, Bits [31:16]:LRO_MIN_RXD_SDL0
+ */
+#define ADMA_LRO_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			   ADMA_LRO_CTRL_OFFSET + 0x0C)
+
+/* LRO RX delay interrupt configurations */
+#define LRO_RX1_DLY_INT        (RALINK_FRAME_ENGINE_BASE + 0x0a70)
+#define LRO_RX2_DLY_INT        (RALINK_FRAME_ENGINE_BASE + 0x0a74)
+#define LRO_RX3_DLY_INT        (RALINK_FRAME_ENGINE_BASE + 0x0a78)
+
+/* LRO auto-learn configurations */
+#define PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET    0x0990
+#define PDMA_LRO_ATL_OVERFLOW_ADJ (RALINK_FRAME_ENGINE_BASE + \
+				   PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET)
+#define LRO_ALT_SCORE_DELTA   (RALINK_FRAME_ENGINE_BASE + 0x0a4c)
+
+/* LRO agg timer configurations */
+#define LRO_MAX_AGG_TIME       (RALINK_FRAME_ENGINE_BASE + 0x0a5c)
+
+/* LRO configurations of RX ring #0 */
+#define LRO_RXRING0_OFFSET          0x0b00
+#define LRO_RX_RING0_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING0_OFFSET + 0x04)
+#define LRO_RX_RING0_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING0_OFFSET + 0x08)
+#define LRO_RX_RING0_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING0_OFFSET + 0x0C)
+#define LRO_RX_RING0_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING0_OFFSET + 0x10)
+#define LRO_RX_RING0_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING0_OFFSET + 0x28)
+/* Bit [8]:RING0_VLD, Bit [9]:RING0_MYIP_VLD */
+#define LRO_RX_RING0_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING0_OFFSET + 0x2C)
+#define LRO_RX_RING0_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING0_OFFSET + 0x30)
+/* LRO configurations of RX ring #1 */
+#define LRO_RXRING1_OFFSET          0x0b40
+#define LRO_RX_RING1_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+				 LRO_RXRING1_OFFSET + 0x00)
+#define LRO_RX_RING1_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x04)
+#define LRO_RX_RING1_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x08)
+#define LRO_RX_RING1_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x0C)
+#define LRO_RX_RING1_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x10)
+#define LRO_RX_RING1_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x14)
+#define LRO_RX_RING1_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x18)
+#define LRO_RX_RING1_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x1C)
+#define LRO_RX_RING1_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING1_OFFSET + 0x20)
+#define LRO_RX_RING1_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING1_OFFSET + 0x24)
+#define LRO_RX_RING1_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING1_OFFSET + 0x28)
+#define LRO_RX_RING1_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING1_OFFSET + 0x2C)
+#define LRO_RX_RING1_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING1_OFFSET + 0x30)
+#define LRO_RXRING2_OFFSET          0x0b80
+#define LRO_RX_RING2_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+				 LRO_RXRING2_OFFSET + 0x00)
+#define LRO_RX_RING2_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x04)
+#define LRO_RX_RING2_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x08)
+#define LRO_RX_RING2_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x0C)
+#define LRO_RX_RING2_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x10)
+#define LRO_RX_RING2_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x14)
+#define LRO_RX_RING2_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x18)
+#define LRO_RX_RING2_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x1C)
+#define LRO_RX_RING2_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING2_OFFSET + 0x20)
+#define LRO_RX_RING2_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING2_OFFSET + 0x24)
+#define LRO_RX_RING2_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING2_OFFSET + 0x28)
+#define LRO_RX_RING2_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING2_OFFSET + 0x2C)
+#define LRO_RX_RING2_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING2_OFFSET + 0x30)
+#define LRO_RXRING3_OFFSET          0x0bc0
+#define LRO_RX_RING3_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE + \
+				 LRO_RXRING3_OFFSET + 0x00)
+#define LRO_RX_RING3_DIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x04)
+#define LRO_RX_RING3_DIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x08)
+#define LRO_RX_RING3_DIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x0C)
+#define LRO_RX_RING3_DIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x10)
+#define LRO_RX_RING3_SIP_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x14)
+#define LRO_RX_RING3_SIP_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x18)
+#define LRO_RX_RING3_SIP_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x1C)
+#define LRO_RX_RING3_SIP_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			      LRO_RXRING3_OFFSET + 0x20)
+#define LRO_RX_RING3_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING3_OFFSET + 0x24)
+#define LRO_RX_RING3_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING3_OFFSET + 0x28)
+#define LRO_RX_RING3_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING3_OFFSET + 0x2C)
+#define LRO_RX_RING3_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE + \
+			       LRO_RXRING3_OFFSET + 0x30)
+
+#define ADMA_DBG_OFFSET	0x0a30
+#define ADMA_TX_DBG0	(RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x00)
+#define ADMA_TX_DBG1	(RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x04)
+#define ADMA_RX_DBG0	(RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x08)
+#define ADMA_RX_DBG1	(RALINK_FRAME_ENGINE_BASE + ADMA_DBG_OFFSET + 0x0C)
+
+/********RSS CR ************/
+#define ADMA_RSS_OFFSET	0x3000
+#define ADMA_RSS_GLO_CFG	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x00)
+#define ADMA_RSS_INDR_TABLE_DW0	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x50)
+#define ADMA_RSS_INDR_TABLE_DW1	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x54)
+#define ADMA_RSS_INDR_TABLE_DW2	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x58)
+#define ADMA_RSS_INDR_TABLE_DW3	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x5C)
+#define ADMA_RSS_INDR_TABLE_DW4	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x60)
+#define ADMA_RSS_INDR_TABLE_DW5	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x64)
+#define ADMA_RSS_INDR_TABLE_DW6	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x68)
+#define ADMA_RSS_INDR_TABLE_DW7	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x6C)
+
+#define ADMA_RSS_HASH_KEY_DW0	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x20)
+#define ADMA_RSS_HASH_KEY_DW1	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x24)
+#define ADMA_RSS_HASH_KEY_DW2	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x28)
+#define ADMA_RSS_HASH_KEY_DW3	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x2C)
+#define ADMA_RSS_HASH_KEY_DW4	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x30)
+#define ADMA_RSS_HASH_KEY_DW5	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x34)
+#define ADMA_RSS_HASH_KEY_DW6	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x38)
+#define ADMA_RSS_HASH_KEY_DW7	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x3C)
+#define ADMA_RSS_HASH_KEY_DW8	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x40)
+#define ADMA_RSS_HASH_KEY_DW9	(RALINK_FRAME_ENGINE_BASE + ADMA_RSS_OFFSET + 0x44)
+/* LRO RX ring mode */
+#define PDMA_RX_NORMAL_MODE         (0x0)
+#define PDMA_RX_PSE_MODE            (0x1)
+#define PDMA_RX_FORCE_PORT          (0x2)
+#define PDMA_RX_AUTO_LEARN          (0x3)
+
+#define ADMA_RX_RING0   (0)
+#define ADMA_RX_RING1   (1)
+#define ADMA_RX_RING2   (2)
+#define ADMA_RX_RING3   (3)
+
+#define ADMA_RX_LEN0_MASK   (0x3fff)
+#define ADMA_RX_LEN1_MASK   (0x3)
+
+#define SET_ADMA_RX_LEN0(x)    ((x) & ADMA_RX_LEN0_MASK)
+#define SET_ADMA_RX_LEN1(x)    ((x) & ADMA_RX_LEN1_MASK)
+
+#define QDMA_PAGE	(ETHDMASYS_FRAME_ENGINE_BASE + 0x19f0)
+
+/*SFQ use*/
+#define VQTX_TB_BASE0 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1980)
+#define VQTX_TB_BASE1 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1984)
+#define VQTX_TB_BASE2 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1988)
+#define VQTX_TB_BASE3 (ETHDMASYS_FRAME_ENGINE_BASE + 0x198C)
+#define SFQ_OFFSET 0x1A80
+#define VQTX_GLO (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET)
+#define VQTX_INVLD_PTR (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x0C)
+#define VQTX_NUM (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x10)
+#define VQTX_SCH (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x18)
+#define VQTX_HASH_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x20)
+#define VQTX_HASH_SD (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x24)
+#define VQTX_VLD_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x30)
+#define VQTX_MIB_IF (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x3C)
+#define VQTX_MIB_PCNT (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x40)
+#define VQTX_MIB_BCNT0 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x44)
+#define VQTX_MIB_BCNT1 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x48)
+#define VQTX_0_BIND_QID	(PQ0 << 0)
+#define VQTX_1_BIND_QID (PQ1 << 8)
+#define VQTX_2_BIND_QID (PQ2 << 16)
+#define VQTX_3_BIND_QID (PQ3 << 24)
+#define VQTX_4_BIND_QID (PQ4 << 0)
+#define VQTX_5_BIND_QID (PQ5 << 8)
+#define VQTX_6_BIND_QID (PQ6 << 16)
+#define VQTX_7_BIND_QID (PQ7 << 24)
+#define VQTX_TB_BASE4 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1990)
+#define VQTX_TB_BASE5 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1994)
+#define VQTX_TB_BASE6 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1998)
+#define VQTX_TB_BASE7 (ETHDMASYS_FRAME_ENGINE_BASE + 0x199C)
+#define VQTX_0_3_BIND_QID (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x140)
+#define VQTX_4_7_BIND_QID (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x144)
+#define PQ0	0
+#define PQ1	1
+#define PQ2	15
+#define PQ3	16
+#define PQ4	30
+#define PQ5	31
+#define PQ6	43
+#define PQ7	63
+
+#if defined(CONFIG_MACH_MT7623)
+#define VQ_NUM0	256
+#define VQ_NUM1	256
+#define VQ_NUM2	256
+#define VQ_NUM3	256
+#define VQ_NUM4	0
+#define VQ_NUM5	0
+#define VQ_NUM6	0
+#define VQ_NUM7	0
+#define VQTX_NUM_0  (4 << 0)
+#define VQTX_NUM_1  (4 << 4)
+#define VQTX_NUM_2  (4 << 8)
+#define VQTX_NUM_3  (4 << 12)
+#define VQTX_NUM_4   0
+#define VQTX_NUM_5   0
+#define VQTX_NUM_6   0
+#define VQTX_NUM_7   0
+#else
+#define VQ_NUM0	128
+#define VQ_NUM1	128
+#define VQ_NUM2	128
+#define VQ_NUM3	128
+#define VQ_NUM4	128
+#define VQ_NUM5	128
+#define VQ_NUM6	128
+#define VQ_NUM7	128
+#define VQTX_NUM_0  (3 << 0)
+#define VQTX_NUM_1  (3 << 4)
+#define VQTX_NUM_2  (3 << 8)
+#define VQTX_NUM_3  (3 << 12)
+#define VQTX_NUM_4  (3 << 16)
+#define VQTX_NUM_5  (3 << 20)
+#define VQTX_NUM_6  (3 << 24)
+#define VQTX_NUM_7  (3 << 28)
+#endif
+
+#define VQTX_MIB_EN BIT(17)
+
+/*HW IO-COHERNET BASE address*/
+#if defined(CONFIG_MACH_LEOPARD)
+#define HW_IOC_BASE	0x1B000080
+#define IOC_OFFSET	4
+#else
+#define HW_IOC_BASE	0x1B000400
+#define IOC_OFFSET	8
+#endif
+
+/*=========================================
+ *    SFQ Table Format define
+ *=========================================
+ */
+struct SFQ_INFO1_T {
+	unsigned int VQHPTR;
+};
+
+struct SFQ_INFO2_T {
+	unsigned int VQTPTR;
+};
+
+struct SFQ_INFO3_T {
+	unsigned int QUE_DEPTH:16;
+	unsigned int DEFICIT_CNT:16;
+};
+
+struct SFQ_INFO4_T {
+	unsigned int RESV;
+};
+
+struct SFQ_INFO5_T {
+	unsigned int PKT_CNT;
+};
+
+struct SFQ_INFO6_T {
+	unsigned int BYTE_CNT;
+};
+
+struct SFQ_INFO7_T {
+	unsigned int BYTE_CNT;
+};
+
+struct SFQ_INFO8_T {
+	unsigned int RESV;
+};
+
+struct SFQ_table {
+	struct SFQ_INFO1_T sfq_info1;
+	struct SFQ_INFO2_T sfq_info2;
+	struct SFQ_INFO3_T sfq_info3;
+	struct SFQ_INFO4_T sfq_info4;
+	struct SFQ_INFO5_T sfq_info5;
+	struct SFQ_INFO6_T sfq_info6;
+	struct SFQ_INFO7_T sfq_info7;
+	struct SFQ_INFO8_T sfq_info8;
+};
+
+#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
+#define FE_GDM_RXID1_OFFSET	(0x0130)
+#define FE_GDM_RXID1		(RALINK_FRAME_ENGINE_BASE + FE_GDM_RXID1_OFFSET)
+#define GDM_VLAN_PRI7_RXID_SEL	BITS(30, 31)
+#define GDM_VLAN_PRI6_RXID_SEL	BITS(28, 29)
+#define GDM_VLAN_PRI5_RXID_SEL	BITS(26, 27)
+#define GDM_VLAN_PRI4_RXID_SEL	BITS(24, 25)
+#define GDM_VLAN_PRI3_RXID_SEL	BITS(22, 23)
+#define GDM_VLAN_PRI2_RXID_SEL	BITS(20, 21)
+#define GDM_VLAN_PRI1_RXID_SEL	BITS(18, 19)
+#define GDM_VLAN_PRI0_RXID_SEL	BITS(16, 17)
+#define GDM_TCP_ACK_RXID_SEL	BITS(4, 5)
+#define GDM_TCP_ACK_WZPC	BIT(3)
+#define GDM_RXID_PRI_SEL	BITS(0, 2)
+
+#define FE_GDM_RXID2_OFFSET	(0x0134)
+#define FE_GDM_RXID2		(RALINK_FRAME_ENGINE_BASE + FE_GDM_RXID2_OFFSET)
+#define GDM_STAG7_RXID_SEL	BITS(30, 31)
+#define GDM_STAG6_RXID_SEL	BITS(28, 29)
+#define GDM_STAG5_RXID_SEL	BITS(26, 27)
+#define GDM_STAG4_RXID_SEL	BITS(24, 25)
+#define GDM_STAG3_RXID_SEL	BITS(22, 23)
+#define GDM_STAG2_RXID_SEL	BITS(20, 21)
+#define GDM_STAG1_RXID_SEL	BITS(18, 19)
+#define GDM_STAG0_RXID_SEL	BITS(16, 17)
+#define GDM_PID2_RXID_SEL	BITS(2, 3)
+#define GDM_PID1_RXID_SEL	BITS(0, 1)
+
+#define GDM_PRI_PID              (0)
+#define GDM_PRI_VLAN_PID         (1)
+#define GDM_PRI_ACK_PID          (2)
+#define GDM_PRI_VLAN_ACK_PID     (3)
+#define GDM_PRI_ACK_VLAN_PID     (4)
+
+#define SET_GDM_VLAN_PRI_RXID_SEL(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(0x03 << (((x) << 1) + 16));   \
+reg_val |= ((y) & 0x3) << (((x) << 1) + 16);  \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_TCP_ACK_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_TCP_ACK_RXID_SEL);   \
+reg_val |= ((x) & 0x3) << 4;  \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_TCP_ACK_WZPC(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_TCP_ACK_WZPC);   \
+reg_val |= ((x) & 0x1) << 3;  \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define SET_GDM_RXID_PRI_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID1); \
+reg_val &= ~(GDM_RXID_PRI_SEL);   \
+reg_val |= (x) & 0x7;  \
+sys_reg_write(FE_GDM_RXID1, reg_val); \
+}
+
+#define GDM_STAG_RXID_SEL(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(0x03 << (((x) << 1) + 16));   \
+reg_val |= ((y) & 0x3) << (((x) << 1) + 16);  \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#define SET_GDM_PID2_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(GDM_PID2_RXID_SEL);   \
+reg_val |= ((x) & 0x3) << 2;  \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#define SET_GDM_PID1_RXID_SEL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(FE_GDM_RXID2); \
+reg_val &= ~(GDM_PID1_RXID_SEL);   \
+reg_val |= ((x) & 0x3);  \
+sys_reg_write(FE_GDM_RXID2, reg_val); \
+}
+
+#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
+/* Per Port Packet Counts in RT3052, added by bobtseng 2009.4.17. */
+#define	PORT0_PKCOUNT		(0xb01100e8)
+#define	PORT1_PKCOUNT		(0xb01100ec)
+#define	PORT2_PKCOUNT		(0xb01100f0)
+#define	PORT3_PKCOUNT		(0xb01100f4)
+#define	PORT4_PKCOUNT		(0xb01100f8)
+#define	PORT5_PKCOUNT		(0xb01100fc)
+
+#define sys_reg_read(phys)	 (__raw_readl((void __iomem *)phys))
+#define sys_reg_write(phys, val) (__raw_writel(val, (void __iomem *)phys))
+
+/* ====================================== */
+#define GDM1_DISPAD       BIT(18)
+#define GDM1_DISCRC       BIT(17)
+
+/* GDMA1 uni-cast frames destination port */
+#define GDM1_ICS_EN	   (0x1 << 22)
+#define GDM1_TCS_EN	   (0x1 << 21)
+#define GDM1_UCS_EN	   (0x1 << 20)
+#define GDM1_JMB_EN	   (0x1 << 19)
+#define GDM1_STRPCRC	   (0x1 << 16)
+#define GDM1_UFRC_P_CPU     (0 << 12)
+
+/* GDMA1 broad-cast MAC address frames */
+#define GDM1_BFRC_P_CPU     (0 << 8)
+
+/* GDMA1 multi-cast MAC address frames */
+#define GDM1_MFRC_P_CPU     (0 << 4)
+
+/* GDMA1 other MAC address frames destination port */
+#define GDM1_OFRC_P_CPU     (0 << 0)
+
+/* checksum generator registers are removed */
+#define ICS_GEN_EN          (0 << 2)
+#define UCS_GEN_EN          (0 << 1)
+#define TCS_GEN_EN          (0 << 0)
+
+/* MDIO_CFG     bit */
+#define MDIO_CFG_GP1_FC_TX	BIT(11)
+#define MDIO_CFG_GP1_FC_RX	BIT(10)
+
+/* ====================================== */
+/* ====================================== */
+#define GP1_LNK_DWN     BIT(9)
+#define GP1_AN_FAIL     BIT(8)
+/* ====================================== */
+/* ====================================== */
+#define PSE_RESET       BIT(0)
+/* ====================================== */
+#define PST_DRX_IDX3       BIT(19)
+#define PST_DRX_IDX2       BIT(18)
+#define PST_DRX_IDX1       BIT(17)
+#define PST_DRX_IDX0       BIT(16)
+#define PST_DTX_IDX3       BIT(3)
+#define PST_DTX_IDX2       BIT(2)
+#define PST_DTX_IDX1       BIT(1)
+#define PST_DTX_IDX0       BIT(0)
+
+#define RX_2B_OFFSET	  BIT(31)
+#define CSR_CLKGATE_BYP	  BIT(30)
+#define MULTI_EN	  BIT(10)
+#define DESC_32B_EN	  BIT(8)
+#define TX_WB_DDONE       BIT(6)
+#define RX_DMA_BUSY       BIT(3)
+#define TX_DMA_BUSY       BIT(1)
+#define RX_DMA_EN         BIT(2)
+#define TX_DMA_EN         BIT(0)
+
+#define PDMA_BT_SIZE_4DWORDS		(0 << 4)
+#define PDMA_BT_SIZE_8DWORDS		BIT(4)
+#define PDMA_BT_SIZE_16DWORDS		(2 << 4)
+#define PDMA_BT_SIZE_32DWORDS		(3 << 4)
+
+#define ADMA_RX_BT_SIZE_4DWORDS		(0 << 11)
+#define ADMA_RX_BT_SIZE_8DWORDS		BIT(11)
+#define ADMA_RX_BT_SIZE_16DWORDS	(2 << 11)
+#define ADMA_RX_BT_SIZE_32DWORDS	(3 << 11)
+
+/* Register bits.
+ */
+
+#define MACCFG_RXEN	BIT(2)
+#define MACCFG_TXEN	BIT(3)
+#define MACCFG_PROMISC	BIT(18)
+#define MACCFG_RXMCAST	BIT(19)
+#define MACCFG_FDUPLEX	BIT(20)
+#define MACCFG_PORTSEL	BIT(27)
+#define MACCFG_HBEATDIS	BIT(28)
+
+#define DMACTL_SR	BIT(1)	/* Start/Stop Receive */
+#define DMACTL_ST	BIT(13)	/* Start/Stop Transmission Command */
+
+#define DMACFG_SWR	BIT(0)	/* Software Reset */
+#define DMACFG_BURST32		(32 << 8)
+
+#define DMASTAT_TS		0x00700000	/* Transmit Process State */
+#define DMASTAT_RS		0x000e0000	/* Receive Process State */
+
+#define MACCFG_INIT		0   /* (MACCFG_FDUPLEX) // | MACCFG_PORTSEL) */
+
+/* Descriptor bits.
+ */
+#define R_OWN		0x80000000	/* Own Bit */
+#define RD_RER		0x02000000	/* Receive End Of Ring */
+#define RD_LS		0x00000100	/* Last Descriptor */
+#define RD_ES		0x00008000	/* Error Summary */
+#define RD_CHAIN	0x01000000	/* Chained */
+
+/* Word 0 */
+#define T_OWN		0x80000000	/* Own Bit */
+#define TD_ES		0x00008000	/* Error Summary */
+
+/* Word 1 */
+#define TD_LS		0x40000000	/* Last Segment */
+#define TD_FS		0x20000000	/* First Segment */
+#define TD_TER		0x08000000	/* Transmit End Of Ring */
+#define TD_CHAIN	0x01000000	/* Chained */
+
+#define TD_SET		0x08000000	/* Setup Packet */
+
+#define POLL_DEMAND 1
+
+#define RSTCTL	(0x34)
+#define RSTCTL_RSTENET1	BIT(19)
+#define RSTCTL_RSTENET2	BIT(20)
+
+#define INIT_VALUE_OF_RT2883_PSE_FQ_CFG		0xff908000
+#define INIT_VALUE_OF_PSE_FQFC_CFG		0x80504000
+#define INIT_VALUE_OF_FORCE_100_FD		0x1001BC01
+#define INIT_VALUE_OF_FORCE_1000_FD		0x1F01DC01
+
+/* Define Whole FE Reset Register */
+#define RSTCTRL			(RALINK_SYSCTL_BASE + 0x34)
+#define RT2880_AGPIOCFG_REG	(RALINK_SYSCTL_BASE + 0x3C)
+
+/*=========================================
+ *    PDMA RX Descriptor Format define
+ *=========================================
+ */
+
+struct PDMA_RXD_INFO1_T {
+	unsigned int PDP0;
+};
+
+struct PDMA_RXD_INFO2_T {
+	unsigned int PLEN1:2;
+	unsigned int LRO_AGG_CNT:8;
+	unsigned int REV:5;
+	unsigned int TAG:1;
+	unsigned int PLEN0:14;
+	unsigned int LS0:1;
+	unsigned int DDONE_bit:1;
+};
+
+struct PDMA_RXD_INFO3_T {
+	unsigned int VID:16;
+	unsigned int TPID:16;
+};
+
+struct PDMA_RXD_INFO4_T {
+	unsigned int FOE_ENTRY:14;
+	unsigned int CRSN:5;
+	unsigned int SP:3;
+	unsigned int FOE_ENTRY_32:1;
+	unsigned int L4F:1;
+	unsigned int L4VLD:1;
+	unsigned int TACK:1;
+	unsigned int IP4F:1;
+	unsigned int IP4:1;
+	unsigned int IP6:1;
+	unsigned int UN_USE1:3;
+};
+
+struct PDMA_rxdesc {
+	struct PDMA_RXD_INFO1_T rxd_info1;
+	struct PDMA_RXD_INFO2_T rxd_info2;
+	struct PDMA_RXD_INFO3_T rxd_info3;
+	struct PDMA_RXD_INFO4_T rxd_info4;
+#ifdef CONFIG_32B_DESC
+	unsigned int rxd_info5;
+	unsigned int rxd_info6;
+	unsigned int rxd_info7;
+	unsigned int rxd_info8;
+#endif
+};
+
+/*=========================================
+ *    PDMA TX Descriptor Format define
+ *=========================================
+ */
+struct PDMA_TXD_INFO1_T {
+	unsigned int SDP0;
+};
+
+struct PDMA_TXD_INFO2_T {
+	unsigned int SDL1:14;
+	unsigned int LS1_bit:1;
+	unsigned int BURST_bit:1;
+	unsigned int SDL0:14;
+	unsigned int LS0_bit:1;
+	unsigned int DDONE_bit:1;
+};
+
+struct PDMA_TXD_INFO3_T {
+	unsigned int SDP1;
+};
+
+struct PDMA_TXD_INFO4_T {
+	unsigned int VLAN_TAG:17;	/* INSV(1)+VPRI(3)+CFI(1)+VID(12) */
+	unsigned int RESV:2;
+	unsigned int UDF:6;
+	unsigned int FPORT:3;
+	unsigned int TSO:1;
+	unsigned int TUI_CO:3;
+};
+
+struct PDMA_txdesc {
+	struct PDMA_TXD_INFO1_T txd_info1;
+	struct PDMA_TXD_INFO2_T txd_info2;
+	struct PDMA_TXD_INFO3_T txd_info3;
+	struct PDMA_TXD_INFO4_T txd_info4;
+#ifdef CONFIG_32B_DESC
+	unsigned int txd_info5;
+	unsigned int txd_info6;
+	unsigned int txd_info7;
+	unsigned int txd_info8;
+#endif
+};
+
+/*=========================================
+ *    QDMA TX Descriptor Format define
+ *=========================================
+ */
+struct QDMA_TXD_INFO1_T {
+	unsigned int SDP;
+};
+
+struct QDMA_TXD_INFO2_T {
+	unsigned int NDP;
+};
+
+struct QDMA_TXD_INFO3_T {
+	unsigned int QID:4;	/* Q0~Q15 */
+	/* unsigned int    VQID                  : 10; */
+	unsigned int PROT:3;
+	unsigned int IPOFST:7;
+	unsigned int SWC_bit:1;
+	unsigned int BURST_bit:1;
+	unsigned int SDL:14;
+	unsigned int LS_bit:1;
+	unsigned int OWN_bit:1;
+};
+
+struct QDMA_TXD_INFO4_T {
+	unsigned int VLAN_TAG:17;	/* INSV(1)+VPRI(3)+CFI(1)+VID(12) */
+	unsigned int VQID0:1;
+	unsigned int SDL:2;
+	unsigned int QID:2;	/* Q16~Q63 */
+	unsigned int RESV:3;
+	unsigned int FPORT:3;
+	unsigned int TSO:1;
+	unsigned int TUI_CO:3;
+};
+
+struct QDMA_txdesc {
+	struct QDMA_TXD_INFO1_T txd_info1;
+	struct QDMA_TXD_INFO2_T txd_info2;
+	struct QDMA_TXD_INFO3_T txd_info3;
+	struct QDMA_TXD_INFO4_T txd_info4;
+#ifdef CONFIG_32B_DESC
+	unsigned int txd_info5;
+	unsigned int txd_info6;
+	unsigned int txd_info7;
+	unsigned int txd_info8;
+#endif
+};
+
+#define QTXD_LEN (sizeof(struct QDMA_txdesc))
+#define PHY_ENABLE_AUTO_NEGO	0x1000
+#define PHY_RESTART_AUTO_NEGO	0x0200
+
+/* PHY_STAT_REG = 1; */
+#define PHY_AUTO_NEGO_COMP	0x0020
+#define PHY_LINK_STATUS		0x0004
+
+/* PHY_AUTO_NEGO_REG = 4; */
+#define PHY_CAP_10_HALF		0x0020
+#define PHY_CAP_10_FULL		0x0040
+#define	PHY_CAP_100_HALF	0x0080
+#define	PHY_CAP_100_FULL	0x0100
+
+/* proc definition */
+
+#define PROCREG_CONTROL_FILE      "/var/run/procreg_control"
+#if defined(CONFIG_MACH_MT7623)
+#define PROCREG_DIR             "mt7623"
+#elif defined(CONFIG_MACH_LEOPARD)
+#define PROCREG_DIR             "leopard"
+#elif defined(CONFIG_PINCTRL_MT7622)
+#define PROCREG_DIR             "mt7622"
+#elif defined(CONFIG_SOC_MT7621)
+#define PROCREG_DIR             "mt7621"
+#endif
+
+#define PROCREG_SKBFREE		"skb_free"
+#define PROCREG_TXRING		"tx_ring"
+#define PROCREG_RXRING		"rx_ring"
+#define PROCREG_RXRING1		"rx_ring1"
+#define PROCREG_RXRING2		"rx_ring2"
+#define PROCREG_RXRING3		"rx_ring3"
+#define PROCREG_NUM_OF_TXD	"num_of_txd"
+#define PROCREG_TSO_LEN		"tso_len"
+#define PROCREG_LRO_STATS	"lro_stats"
+#define PROCREG_HW_LRO_STATS	"hw_lro_stats"
+#define PROCREG_HW_LRO_AUTO_TLB	"hw_lro_auto_tlb"
+#define PROCREG_HW_IO_COHERENT	"hw_iocoherent"
+#define PROCREG_GMAC		"gmac"
+#define PROCREG_GMAC2           "gmac2"
+#define PROCREG_CP0		"cp0"
+#define PROCREG_RAQOS		"qos"
+#define PROCREG_READ_VAL	"regread_value"
+#define PROCREG_WRITE_VAL	"regwrite_value"
+#define PROCREG_ADDR		"reg_addr"
+#define PROCREG_CTL		"procreg_control"
+#define PROCREG_RXDONE_INTR	"rxdone_intr_count"
+#define PROCREG_ESW_INTR	"esw_intr_count"
+#define PROCREG_ESW_CNT		"esw_cnt"
+#define PROCREG_ETH_CNT		"eth_cnt"
+#define PROCREG_SNMP		"snmp"
+#define PROCREG_SET_LAN_IP	"set_lan_ip"
+#if defined(TASKLET_WORKQUEUE_SW)
+#define PROCREG_SCHE		"schedule"
+#endif
+#define PROCREG_QDMA            "qdma"
+#define PROCREG_INT_DBG		"int_dbg"
+struct rt2880_reg_op_data {
+	char name[64];
+	unsigned int reg_addr;
+	unsigned int op;
+	unsigned int reg_value;
+};
+
+struct lro_counters {
+	u32 lro_aggregated;
+	u32 lro_flushed;
+	u32 lro_no_desc;
+};
+
+struct lro_para_struct {
+	unsigned int lan_ip1;
+};
+
+struct parse_result {
+	/* layer2 header */
+	u8 dmac[6];
+	u8 smac[6];
+
+	/* vlan header */
+	u16 vlan_tag;
+	u16 vlan1_gap;
+	u16 vlan1;
+	u16 vlan2_gap;
+	u16 vlan2;
+	u16 vlan_layer;
+
+	/* pppoe header */
+	u32 pppoe_gap;
+	u16 ppp_tag;
+	u16 pppoe_sid;
+
+	/* layer3 header */
+	u16 eth_type;
+	struct iphdr iph;
+	struct ipv6hdr ip6h;
+
+	/* layer4 header */
+	struct tcphdr th;
+	struct udphdr uh;
+
+	u32 pkt_type;
+	u8 is_mcast;
+};
+
+#define DMA_GLO_CFG PDMA_GLO_CFG
+
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define GDMA1_FWD_PORT 0x5555
+#define GDMA2_FWD_PORT 0x5555
+#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
+#define GDMA1_FWD_PORT 0x5555
+#define GDMA2_FWD_PORT 0x5555
+#else
+#define GDMA1_FWD_PORT 0x0000
+#define GDMA2_FWD_PORT 0x0000
+#endif
+
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
+#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
+#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
+#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
+#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
+#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
+#else
+#define RAETH_RX_CALC_IDX0 RX_CALC_IDX0
+#define RAETH_RX_CALC_IDX1 RX_CALC_IDX1
+#endif
+#define RAETH_RX_CALC_IDX2 RX_CALC_IDX2
+#define RAETH_RX_CALC_IDX3 RX_CALC_IDX3
+#define RAETH_FE_INT_STATUS FE_INT_STATUS
+#define RAETH_FE_INT_ALL FE_INT_ALL
+#define RAETH_FE_INT_ENABLE FE_INT_ENABLE
+#define RAETH_FE_INT_DLY_INIT FE_INT_DLY_INIT
+#define RAETH_FE_INT_SETTING (RX_DONE_INT0 | RX_DONE_INT1 | \
+			      TX_DONE_INT0 | TX_DONE_INT1 | \
+			      TX_DONE_INT2 | TX_DONE_INT3)
+#define QFE_INT_SETTING (RX_DONE_INT0 | RX_DONE_INT1 | \
+			 TX_DONE_INT0 | TX_DONE_INT1 | \
+			 TX_DONE_INT2 | TX_DONE_INT3)
+#define RAETH_TX_DLY_INT TX_DLY_INT
+#define RAETH_TX_DONE_INT0 TX_DONE_INT0
+#define RAETH_DLY_INT_CFG DLY_INT_CFG
+
+/* io-coherent for ethdmasys */
+#define	IOC_ETH_PDMA	BIT(0)
+#define	IOC_ETH_QDMA	BIT(1)
+
+#endif	/* RAETH_REG_H */
diff --git a/src/kernel/modules/netsys_driver/nat/include/raether.h b/src/kernel/modules/netsys_driver/nat/include/raether.h
new file mode 100755
index 0000000..81e27b7
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/raether.h
@@ -0,0 +1,482 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA2882ETHEND_H
+#define RA2882ETHEND_H
+
+#include "raeth_config.h"
+#include "raeth_reg.h"
+#include "ra_dbg_proc.h"
+#include "ra_ioctl.h"
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/fs.h>
+#include <linux/mii.h>
+#include <linux/uaccess.h>
+#if defined(CONFIG_RAETH_TSO)
+#include <linux/tcp.h>
+#include <net/ipv6.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <linux/in.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_pppox.h>
+#endif
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ppp_defs.h>
+
+/* LRO support */
+#include <linux/inet_lro.h>
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <asm-generic/pci-dma-compat.h>
+
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/dma-mapping.h>
+
+#if defined(CONFIG_MACH_MT7623)
+#include <linux/delay.h>
+#endif
+#include <linux/kthread.h>
+#include <linux/prefetch.h>
+
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_RA_HW_NAT)  || defined(CONFIG_RA_HW_NAT_MODULE)
+#include <net/ra_nat.h>
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define ETH_GPIO_BASE	0x10005000
+
+#if defined(CONFIG_QDMA_MQ)
+#define GMAC1_TXQ_NUM 3
+#define GMAC1_TXQ_TXD_NUM 512
+#define GMAC1_TXD_NUM (GMAC1_TXQ_NUM * GMAC1_TXQ_TXD_NUM)
+#define GMAC2_TXQ_NUM 1
+#define GMAC2_TXQ_TXD_NUM 128
+#define GMAC2_TXD_NUM (GMAC2_TXQ_NUM * GMAC2_TXQ_TXD_NUM)
+#define NUM_TX_DESC (GMAC1_TXD_NUM + GMAC2_TXD_NUM)
+#define TOTAL_TXQ_NUM (GMAC1_TXQ_NUM + GMAC2_TXQ_NUM)
+#else
+#define TOTAL_TXQ_NUM 2
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define NUM_RX_DESC     2048
+#define NUM_QRX_DESC 16
+#define NUM_PQ_RESV 4
+#define FFA 2048
+#define QUEUE_OFFSET 0x10
+#else
+#define NUM_QRX_DESC 16
+#define NUM_PQ_RESV 4
+#define FFA 512
+#define QUEUE_OFFSET 0x10
+#endif
+
+#if defined(CONFIG_PINCTRL_MT7622)
+#define NUM_PQ 64
+#else
+#define NUM_PQ 16
+#endif
+/* #define NUM_TX_MAX_PROCESS NUM_TX_DESC */
+#define NUM_RX_MAX_PROCESS 16
+
+#define MAX_RX_RING_NUM	4
+#define NUM_LRO_RX_DESC	16
+
+#define	MAX_RX_LENGTH	1536
+
+#if defined(CONFIG_SUPPORT_OPENWRT)
+#define DEV_NAME        "eth0"
+#define DEV2_NAME       "eth1"
+#else
+#define DEV_NAME        "eth2"
+#define DEV2_NAME       "eth3"
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define GMAC0_OFFSET    0xE000
+#define GMAC2_OFFSET    0xE006
+#else
+#define GMAC0_OFFSET    0x28
+#define GMAC2_OFFSET    0x22
+#endif
+
+#if defined(CONFIG_MACH_MT7623)
+#define IRQ_ENET0       232
+#define IRQ_ENET1       231
+#define IRQ_ENET2       230
+#else
+/* NOTE(Nelson): prom version started from 20150806 */
+#define IRQ_ENET0       255
+#define IRQ_ENET1       256
+#define IRQ_ENET2       257
+#endif
+#define MTK_NAPI_WEIGHT	64
+
+#define RAETH_VERSION	"STD_v0.1"
+
+/* MT7623 PSE reset workaround */
+#define	FE_RESET_POLLING_MS	(5000)
+
+/*LEOPARD POLLING*/
+#define PHY_POLLING_MS		(1000)
+#define FE_DEFAULT_LAN_IP	"192.168.1.1"
+#define IP4_ADDR_LEN		16
+
+#if defined(CONFIG_SOC_MT7621)
+#define MT_TRIGGER_LOW	0
+#else
+#define MT_TRIGGER_LOW	IRQF_TRIGGER_LOW
+#endif
+
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+	MTK_CLK_ETHIF,
+	MTK_CLK_ESW,
+	MTK_CLK_GP0,
+	MTK_CLK_GP1,
+	MTK_CLK_GP2,
+	MTK_CLK_SGMII_TX250M,
+	MTK_CLK_SGMII_RX250M,
+	MTK_CLK_SGMII_CDR_REF,
+	MTK_CLK_SGMII_CDR_FB,
+	MTK_CLK_SGMII1_TX250M,
+	MTK_CLK_SGMII1_RX250M,
+	MTK_CLK_SGMII1_CDR_REF,
+	MTK_CLK_SGMII1_CDR_FB,
+	MTK_CLK_TRGPLL,
+	MTK_CLK_SGMIPLL,
+	MTK_CLK_ETH1PLL,
+	MTK_CLK_ETH2PLL,
+	MTK_CLK_FE,
+	MTK_CLK_SGMII_TOP,
+	MTK_CLK_MAX
+};
+
+struct END_DEVICE {
+	struct device *dev;
+	unsigned int tx_cpu_owner_idx0;
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
+	unsigned int rx_calc_idx[MAX_RX_RING_NUM];
+#endif
+	unsigned int tx_ring_full;
+	unsigned int tx_full;	/* NOTE(Nelso): unused, can remove */
+
+	/* PDMA TX  PTR */
+	dma_addr_t phy_tx_ring0;
+
+	/* QDMA TX  PTR */
+	struct platform_device *qdma_pdev;
+	/* struct sk_buff *free_skb[NUM_TX_DESC]; */
+	struct sk_buff **free_skb;
+	unsigned int tx_dma_ptr;
+	unsigned int tx_cpu_ptr;
+	unsigned int tx_cpu_idx;
+	unsigned int rls_cpu_idx;
+	/* atomic_t  free_txd_num[TOTAL_TXQ_NUM]; */
+	atomic_t  *free_txd_num;
+	/* unsigned int free_txd_head[TOTAL_TXQ_NUM]; */
+	/* unsigned int free_txd_tail[TOTAL_TXQ_NUM]; */
+	unsigned int *free_txd_head;
+	unsigned int *free_txd_tail;
+	struct QDMA_txdesc *txd_pool;
+	dma_addr_t phy_txd_pool;
+	/* unsigned int txd_pool_info[NUM_TX_DESC]; */
+	unsigned int *txd_pool_info;
+	struct QDMA_txdesc *free_head;
+	unsigned int phy_free_head;
+	unsigned int *free_page_head;
+	dma_addr_t phy_free_page_head;
+	struct PDMA_rxdesc *qrx_ring;
+	dma_addr_t phy_qrx_ring;
+
+	/* TSO */
+	unsigned int skb_txd_num;
+
+	/* MT7623 workaround */
+	struct work_struct reset_task;
+
+	/* workqueue_bh */
+	struct work_struct rx_wq;
+
+	/* tasklet_bh */
+	struct tasklet_struct rx_tasklet;
+
+	/* struct sk_buff *skb_free[NUM_TX_DESC]; */
+	struct sk_buff **skb_free;
+	unsigned int free_idx;
+
+	struct net_device_stats stat;	/* The new statistics table. */
+	spinlock_t page_lock;	/* spin_lock for cr access critial section */
+	spinlock_t irq_lock;	/* spin_lock for isr critial section */
+	spinlock_t mdio_lock;   /* spin_lock for mdio reg access */
+	struct PDMA_txdesc *tx_ring0;
+	struct PDMA_rxdesc *rx_ring[MAX_RX_RING_NUM];
+	dma_addr_t phy_rx_ring[MAX_RX_RING_NUM];
+
+	/* void *netrx_skb_data[MAX_RX_RING_NUM][NUM_RX_DESC]; */
+	void **netrx_skb_data[MAX_RX_RING_NUM];
+
+	/* struct sk_buff *netrx0_skbuf[NUM_RX_DESC]; */
+	/*struct sk_buff **netrx0_skbuf;*/
+	void **netrx0_skb_data;
+	/* napi */
+	struct napi_struct napi;
+	struct napi_struct napi_rx;
+	struct napi_struct napi_rx_rss0;
+	struct napi_struct napi_rx_rss1;
+	struct napi_struct napi_rx_rss2;
+	struct napi_struct napi_rx_rss3;
+	struct napi_struct napi_tx;
+	struct net_device dummy_dev;
+
+	/* clock control */
+	struct clk	*clks[MTK_CLK_MAX];
+
+	/* gsw device node */
+	struct device_node *switch_np;
+
+	/* GE1 support */
+	struct net_device *netdev;
+	/* GE2 support */
+	struct net_device *pseudo_dev;
+	unsigned int is_pseudo;
+
+	struct mii_if_info mii_info;
+	struct lro_counters lro_counters;
+	struct net_lro_mgr lro_mgr;
+	struct net_lro_desc lro_arr[8];
+	struct vlan_group *vlgrp;
+
+	/* virtual base addr from device tree */
+	void __iomem *ethdma_sysctl_base;
+
+	unsigned int irq0;
+	unsigned int irq1;
+	unsigned int irq2;
+	unsigned int irq3;
+	unsigned int esw_irq;
+	void __iomem *fe_tx_int_status;
+	void __iomem *fe_tx_int_enable;
+	void __iomem *fe_rx_int_status;
+	void __iomem *fe_rx_int_enable;
+
+	unsigned int features;
+	unsigned int chip_name;
+	unsigned int architecture;
+
+	/* IP address */
+	char lan_ip4_addr[IP4_ADDR_LEN];
+
+	/* Function pointers */
+	int (*ei_start_xmit)(struct sk_buff *skb, struct net_device *netdev,
+			     int gmac_no);
+	int (*ei_xmit_housekeeping)(struct net_device *netdev, int budget);
+	int (*ei_eth_recv)(struct net_device *dev,
+			   struct napi_struct *napi,
+			   int budget);
+	int (*ei_eth_recv_rss0)(struct net_device *dev,
+				struct napi_struct *napi,
+			   int budget);
+	int (*ei_eth_recv_rss1)(struct net_device *dev,
+				struct napi_struct *napi,
+			   int budget);
+	int (*ei_eth_recv_rss2)(struct net_device *dev,
+				struct napi_struct *napi,
+			   int budget);
+	int (*ei_eth_recv_rss3)(struct net_device *dev,
+				struct napi_struct *napi,
+			   int budget);
+	int (*ei_fill_tx_desc)(struct net_device *dev,
+			       unsigned long *tx_cpu_owner_idx,
+			       struct sk_buff *skb, int gmac_no);
+
+	/* MT7623 PSE reset workaround */
+	struct task_struct *kreset_task;
+	struct task_struct *kphy_poll_task;
+	unsigned int fe_reset_times;
+	unsigned int tx_mask;
+	unsigned int rx_mask;
+	unsigned int *rls_cnt;
+};
+
+#ifdef CONFIG_INET_LRO
+static inline void ei_lro_flush_all(struct net_lro_mgr *lro_mgr)
+{
+	lro_flush_all(lro_mgr);
+}
+#else
+static inline void ei_lro_flush_all(struct net_lro_mgr *lro_mgr)
+{
+}
+#endif
+
+struct net_device_stats *ra_get_stats(struct net_device *dev);
+
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+
+int ra2882eth_init(void);
+void ra2882eth_cleanup_module(void);
+
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data);
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data);
+u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr);
+u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr,
+		      u32 *read_data);
+u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr,
+		       u32 write_data);
+
+/* HNAT functions */
+#if defined(CONFIG_RA_NAT_NONE)
+static int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
+static int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
+#else
+extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
+#endif
+
+/* PDMA functions */
+int fe_pdma_wait_dma_idle(void);
+int fe_pdma_rx_dma_init(struct net_device *dev);
+int fe_pdma_tx_dma_init(struct net_device *dev);
+void fe_pdma_rx_dma_deinit(struct net_device *dev);
+void fe_pdma_tx_dma_deinit(struct net_device *dev);
+void set_fe_pdma_glo_cfg(void);
+int ei_pdma_start_xmit(struct sk_buff *skb, struct net_device *dev,
+		       int gmac_no);
+int ei_pdma_xmit_housekeeping(struct net_device *netdev,
+			      int budget);
+int fe_fill_tx_desc(struct net_device *dev,
+		    unsigned long *tx_cpu_owner_idx,
+		    struct sk_buff *skb,
+		    int gmac_no);
+int fe_fill_tx_desc_tso(struct net_device *dev,
+			unsigned long *tx_cpu_owner_idx,
+			struct sk_buff *skb,
+			int gmac_no);
+void fe_set_sw_lro_my_ip(char *lan_ip_addr);
+
+/* QDMA functions */
+int fe_qdma_wait_dma_idle(void);
+int fe_qdma_rx_dma_init(struct net_device *dev);
+int fe_qdma_tx_dma_init(struct net_device *dev);
+void fe_qdma_rx_dma_deinit(struct net_device *dev);
+void fe_qdma_tx_dma_deinit(struct net_device *dev);
+void set_fe_qdma_glo_cfg(void);
+int ei_qdma_start_xmit(struct sk_buff *skb, struct net_device *dev,
+		       int gmac_no);
+int ei_qdma_xmit_housekeeping(struct net_device *netdev, int budget);
+int ei_qdma_ioctl(struct net_device *dev, struct ifreq *ifr,
+		  struct qdma_ioctl_data *ioctl_data);
+int ephy_ioctl(struct net_device *dev, struct ifreq *ifr,
+	       struct ephy_ioctl_data *ioctl_data);
+/* HW LRO functions */
+int fe_hw_lro_init(struct net_device *dev);
+void fe_hw_lro_deinit(struct net_device *dev);
+int fe_hw_lro_recv(struct net_device *dev,
+		   struct napi_struct *napi,
+		   int budget);
+void fe_set_hw_lro_my_ip(char *lan_ip_addr);
+
+int fe_rss_4ring_init(struct net_device *dev);
+void fe_rss_4ring_deinit(struct net_device *dev);
+int fe_rss_2ring_init(struct net_device *dev);
+void fe_rss_2ring_deinit(struct net_device *dev);
+int fe_rss0_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget);
+int fe_rss1_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget);
+int fe_rss2_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget);
+int fe_rss3_recv(struct net_device *dev,
+		 struct napi_struct *napi,
+		   int budget);
+static inline void *raeth_alloc_skb_data(size_t size, gfp_t flags)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+	return kmalloc(size, flags);
+#else
+	return netdev_alloc_frag(size);
+#endif
+}
+
+static inline void raeth_free_skb_data(void *addr)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+	kfree(addr);
+#else
+	skb_free_frag(addr);
+#endif
+}
+
+static inline struct sk_buff *raeth_build_skb(void *data,
+					      unsigned int frag_size)
+{
+#ifdef CONFIG_ETH_SLAB_ALLOC_SKB
+	return build_skb(data, 0);
+#else
+	return build_skb(data, frag_size);
+#endif
+}
+
+extern u32 gmac1_txq_num;
+extern u32 gmac1_txq_txd_num;
+extern u32 gmac1_txd_num;
+extern u32 gmac2_txq_num;
+extern u32 gmac2_txq_txd_num;
+extern u32 gmac2_txd_num;
+extern u32 num_rx_desc;
+extern u32 num_tx_max_process;
+extern u32 num_tx_desc;
+extern u32 total_txq_num;
+extern u32 mac_to_gigaphy_mode_addr;
+extern u32 mac_to_gigaphy_mode_addr2;
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/raether_hwlro.h b/src/kernel/modules/netsys_driver/nat/include/raether_hwlro.h
new file mode 100755
index 0000000..c319aca
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/raether_hwlro.h
@@ -0,0 +1,403 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_HWLRO_H
+#define RA_HWLRO_H
+
+#include "raeth_reg.h"
+
+#define	HW_LRO_TIMER_UNIT   1
+#define	HW_LRO_REFRESH_TIME 50000
+#define	HW_LRO_MAX_AGG_CNT	64
+#define	HW_LRO_AGG_DELTA	1
+#define	MAX_LRO_RX_LENGTH	(PAGE_SIZE * 3)
+#define	HW_LRO_AGG_TIME		10	/* 200us */
+#define	HW_LRO_AGE_TIME		50	/* 1ms */
+#define	HW_LRO_BW_THRE	        3000
+#define	HW_LRO_REPLACE_DELTA    1000
+#define	HW_LRO_SDL_REMAIN_ROOM	1522
+
+struct PDMA_LRO_AUTO_TLB_INFO0_T {
+	unsigned int DTP:16;
+	unsigned int STP:16;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO1_T {
+	unsigned int SIP0:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO2_T {
+	unsigned int SIP1:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO3_T {
+	unsigned int SIP2:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO4_T {
+	unsigned int SIP3:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO5_T {
+	unsigned int VLAN_VID0:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO6_T {
+	unsigned int VLAN_VID1:16;
+	unsigned int VLAN_VID_VLD:4;
+	unsigned int CNT:12;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO7_T {
+	unsigned int DW_LEN:32;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO8_T {
+	unsigned int DIP_ID:2;
+	unsigned int IPV6:1;
+	unsigned int IPV4:1;
+	unsigned int RESV:27;
+	unsigned int VALID:1;
+};
+
+struct PDMA_LRO_AUTO_TLB_INFO {
+	struct PDMA_LRO_AUTO_TLB_INFO0_T auto_tlb_info0;
+	struct PDMA_LRO_AUTO_TLB_INFO1_T auto_tlb_info1;
+	struct PDMA_LRO_AUTO_TLB_INFO2_T auto_tlb_info2;
+	struct PDMA_LRO_AUTO_TLB_INFO3_T auto_tlb_info3;
+	struct PDMA_LRO_AUTO_TLB_INFO4_T auto_tlb_info4;
+	struct PDMA_LRO_AUTO_TLB_INFO5_T auto_tlb_info5;
+	struct PDMA_LRO_AUTO_TLB_INFO6_T auto_tlb_info6;
+	struct PDMA_LRO_AUTO_TLB_INFO7_T auto_tlb_info7;
+	struct PDMA_LRO_AUTO_TLB_INFO8_T auto_tlb_info8;
+};
+
+#define PDMA_LRO_EN             BIT(0)
+#define PDMA_LRO_IPV6_EN        BIT(1)
+#define PDMA_LRO_CRSN_BNW       BIT(6)
+#define PDMA_LRO_IPV4_CSUM_UPDATE_EN    BIT(7)
+#define PDMA_LRO_IPV4_CTRL_PUSH_EN	BIT(23)
+#define PDMA_LRO_RXD_PREFETCH_EN        BITS(3, 4)
+#define PDMA_NON_LRO_MULTI_EN   BIT(2)
+#define PDMA_LRO_DLY_INT_EN             BIT(5)
+#define PDMA_LRO_FUSH_REQ               BITS(26, 28)
+#define PDMA_LRO_RELINGUISH     BITS(29, 31)
+#define PDMA_LRO_FREQ_PRI_ADJ   BITS(16, 19)
+#define PDMA_LRO_TPUT_PRE_ADJ           BITS(8, 11)
+#define PDMA_LRO_TPUT_PRI_ADJ           BITS(12, 15)
+#define PDMA_LRO_ALT_SCORE_MODE         BIT(21)
+#define PDMA_LRO_RING_AGE1      BITS(22, 31)
+#define PDMA_LRO_RING_AGE2      BITS(0, 5)
+#define PDMA_LRO_RING_AGG               BITS(10, 25)
+#define PDMA_LRO_RING_AGG_CNT1          BITS(26, 31)
+#define PDMA_LRO_RING_AGG_CNT2          BITS(0, 1)
+#define PDMA_LRO_ALT_TICK_TIMER         BITS(16, 20)
+#define PDMA_LRO_LRO_MIN_RXD_SDL0       BITS(16, 31)
+
+#define PDMA_LRO_DLY_INT_EN_OFFSET          (5)
+#define PDMA_LRO_TPUT_PRE_ADJ_OFFSET        (8)
+#define PDMA_LRO_FREQ_PRI_ADJ_OFFSET    (16)
+#define PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET    (16)
+#define PDMA_LRO_TPUT_PRI_ADJ_OFFSET        (12)
+#define PDMA_LRO_ALT_SCORE_MODE_OFFSET      (21)
+#define PDMA_LRO_FUSH_REQ_OFFSET            (26)
+#define PDMA_NON_LRO_MULTI_EN_OFFSET        (2)
+#define PDMA_LRO_IPV6_EN_OFFSET             (1)
+#define PDMA_LRO_RXD_PREFETCH_EN_OFFSET     (3)
+#define PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET (7)
+#define PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET   (23)
+#define PDMA_LRO_ALT_TICK_TIMER_OFFSET      (16)
+
+#define PDMA_LRO_TPUT_OVERFLOW_ADJ  BITS(12, 31)
+#define PDMA_LRO_CNT_OVERFLOW_ADJ   BITS(0, 11)
+
+#define PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET   (12)
+#define PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET    (0)
+
+#define PDMA_LRO_ALT_BYTE_CNT_MODE  (0)
+#define PDMA_LRO_ALT_PKT_CNT_MODE   (1)
+
+/* LRO_RX_RING1_CTRL_DW1 offsets  */
+#define PDMA_LRO_AGE_H_OFFSET           (10)
+#define PDMA_LRO_RING_AGE1_OFFSET       (22)
+#define PDMA_LRO_RING_AGG_CNT1_OFFSET   (26)
+/* LRO_RX_RING1_CTRL_DW2 offsets  */
+#define PDMA_RX_MODE_OFFSET             (6)
+#define PDMA_RX_PORT_VALID_OFFSET       (8)
+#define PDMA_RX_MYIP_VALID_OFFSET       (9)
+#define PDMA_LRO_RING_AGE2_OFFSET       (0)
+#define PDMA_LRO_RING_AGG_OFFSET        (10)
+#define PDMA_LRO_RING_AGG_CNT2_OFFSET   (0)
+/* LRO_RX_RING1_CTRL_DW3 offsets  */
+#define PDMA_LRO_AGG_CNT_H_OFFSET       (6)
+/* LRO_RX_RING1_STP_DTP_DW offsets */
+#define PDMA_RX_TCP_SRC_PORT_OFFSET     (16)
+#define PDMA_RX_TCP_DEST_PORT_OFFSET    (0)
+/* LRO_RX_RING1_CTRL_DW0 offsets */
+#define PDMA_RX_IPV4_FORCE_OFFSET       (1)
+#define PDMA_RX_IPV6_FORCE_OFFSET       (0)
+
+#define ADMA_MULTI_RXD_PREFETCH_EN	BIT(3)
+#define ADMA_RXD_PREFETCH_EN		BIT(4)
+
+#define SET_PDMA_LRO_MAX_AGG_CNT(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW3); \
+reg_val &= ~0xff;   \
+reg_val |= ((x) & 0xff);  \
+sys_reg_write(ADMA_LRO_CTRL_DW3, reg_val); \
+}
+
+#define SET_PDMA_LRO_FLUSH_REQ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_FUSH_REQ;   \
+reg_val |= ((x) & 0x7) << PDMA_LRO_FUSH_REQ_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV6_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV6_EN;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV6_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_RXD_PREFETCH_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_RXD_PREFETCH_EN;   \
+reg_val |= (x);  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV4_CSUM_UPDATE_EN;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_IPV4_CTRL_PUSH_EN;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_NON_LRO_MULTI_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~(PDMA_NON_LRO_MULTI_EN);   \
+reg_val |= ((x) & 0x1) << PDMA_NON_LRO_MULTI_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_FREQ_PRI_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_FREQ_PRI_ADJ;   \
+reg_val |= ((x) & 0xf) << PDMA_LRO_FREQ_PRI_ADJ_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_PRE_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_TPUT_PRE_ADJ;   \
+reg_val |= ((x) & 0xf) << PDMA_LRO_TPUT_PRE_ADJ_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_PRI_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_TPUT_PRI_ADJ;   \
+reg_val |= ((x) & 0xf) << PDMA_LRO_TPUT_PRI_ADJ_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_SCORE_MODE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_ALT_SCORE_MODE;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_ALT_SCORE_MODE_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_DLY_INT_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW0); \
+reg_val &= ~PDMA_LRO_DLY_INT_EN;   \
+reg_val |= ((x) & 0x1) << PDMA_LRO_DLY_INT_EN_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW0, reg_val); \
+}
+
+#define SET_PDMA_LRO_BW_THRESHOLD(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW2); \
+reg_val = (x);  \
+sys_reg_write(ADMA_LRO_CTRL_DW2, reg_val); \
+}
+
+#define SET_PDMA_LRO_MIN_RXD_SDL(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_LRO_CTRL_DW3); \
+reg_val &= ~PDMA_LRO_LRO_MIN_RXD_SDL0;   \
+reg_val |= ((x) & 0xffff) << PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET;  \
+sys_reg_write(ADMA_LRO_CTRL_DW3, reg_val); \
+}
+
+#define SET_PDMA_LRO_TPUT_OVERFLOW_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(PDMA_LRO_ATL_OVERFLOW_ADJ); \
+reg_val &= ~PDMA_LRO_TPUT_OVERFLOW_ADJ;   \
+reg_val |= ((x) & 0xfffff) << PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET;  \
+sys_reg_write(PDMA_LRO_ATL_OVERFLOW_ADJ, reg_val); \
+}
+
+#define SET_PDMA_LRO_CNT_OVERFLOW_ADJ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(PDMA_LRO_ATL_OVERFLOW_ADJ); \
+reg_val &= ~PDMA_LRO_CNT_OVERFLOW_ADJ;   \
+reg_val |= ((x) & 0xfff) << PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET;  \
+sys_reg_write(PDMA_LRO_ATL_OVERFLOW_ADJ, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_ALT_REFRESH_TIMER); \
+reg_val &= ~PDMA_LRO_ALT_TICK_TIMER;   \
+reg_val |= ((x) & 0x1f) << PDMA_LRO_ALT_TICK_TIMER_OFFSET;  \
+sys_reg_write(LRO_ALT_REFRESH_TIMER, reg_val); \
+}
+
+#define SET_PDMA_LRO_ALT_REFRESH_TIMER(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_ALT_REFRESH_TIMER); \
+reg_val &= ~0xffff;   \
+reg_val |= ((x) & 0xffff);  \
+sys_reg_write(LRO_ALT_REFRESH_TIMER, reg_val); \
+}
+
+#define SET_PDMA_LRO_MAX_AGG_TIME(x) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_MAX_AGG_TIME); \
+reg_val &= ~0xffff;   \
+reg_val |= ((x) & 0xffff);  \
+sys_reg_write(LRO_MAX_AGG_TIME, reg_val); \
+}
+
+#define SET_PDMA_RXRING_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x3 << PDMA_RX_MODE_OFFSET);   \
+reg_val |= (y) << PDMA_RX_MODE_OFFSET;  \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_MYIP_VALID(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_MYIP_VALID_OFFSET); \
+reg_val |= ((y) & 0x1) << PDMA_RX_MYIP_VALID_OFFSET;    \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_VALID(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_PORT_VALID_OFFSET); \
+reg_val |= ((y) & 0x1) << PDMA_RX_PORT_VALID_OFFSET;    \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_TCP_SRC_PORT(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_STP_DTP_DW + \
+				    (((x) - 1) << 6)); \
+reg_val &= ~(0xffff << PDMA_RX_TCP_SRC_PORT_OFFSET);    \
+reg_val |= (y) << PDMA_RX_TCP_SRC_PORT_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_STP_DTP_DW + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_TCP_DEST_PORT(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_STP_DTP_DW + \
+				    (((x) - 1) << 6)); \
+reg_val &= ~(0xffff << PDMA_RX_TCP_DEST_PORT_OFFSET);    \
+reg_val |= (y) << PDMA_RX_TCP_DEST_PORT_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_STP_DTP_DW + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_IPV4_FORCE_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_IPV4_FORCE_OFFSET);    \
+reg_val |= (y) << PDMA_RX_IPV4_FORCE_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_IPV6_FORCE_MODE(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6)); \
+reg_val &= ~(0x1 << PDMA_RX_IPV6_FORCE_OFFSET);    \
+reg_val |= (y) << PDMA_RX_IPV6_FORCE_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_CTRL_DW0 + (((x) - 1) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_AGE_TIME(x, y) \
+{ \
+unsigned int reg_val1 = sys_reg_read(LRO_RX_RING0_CTRL_DW1 + ((x) << 6)); \
+unsigned int reg_val2 = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val1 &= ~PDMA_LRO_RING_AGE1;    \
+reg_val2 &= ~PDMA_LRO_RING_AGE2;    \
+reg_val1 |= ((y) & 0x3ff) << PDMA_LRO_RING_AGE1_OFFSET;    \
+reg_val2 |= (((y) >> PDMA_LRO_AGE_H_OFFSET) & 0x03f) << \
+	    PDMA_LRO_RING_AGE2_OFFSET;\
+sys_reg_write(LRO_RX_RING0_CTRL_DW1 + ((x) << 6), reg_val1); \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val2); \
+}
+
+#define SET_PDMA_RXRING_AGG_TIME(x, y) \
+{ \
+unsigned int reg_val = sys_reg_read(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
+reg_val &= ~PDMA_LRO_RING_AGG;    \
+reg_val |= ((y) & 0xffff) << PDMA_LRO_RING_AGG_OFFSET;    \
+sys_reg_write(LRO_RX_RING0_CTRL_DW2 + ((x) << 6), reg_val); \
+}
+
+#define SET_PDMA_RXRING_MAX_AGG_CNT(x, y) \
+{ \
+unsigned int reg_val1 = sys_reg_read(LRO_RX_RING1_CTRL_DW2 + \
+				     (((x) - 1) << 6)); \
+unsigned int reg_val2 = sys_reg_read(LRO_RX_RING1_CTRL_DW3 + \
+				     (((x) - 1) << 6)); \
+reg_val1 &= ~PDMA_LRO_RING_AGG_CNT1;    \
+reg_val2 &= ~PDMA_LRO_RING_AGG_CNT2;    \
+reg_val1 |= ((y) & 0x3f) << PDMA_LRO_RING_AGG_CNT1_OFFSET;    \
+reg_val2 |= (((y) >> PDMA_LRO_AGG_CNT_H_OFFSET) & 0x03) << \
+	     PDMA_LRO_RING_AGG_CNT2_OFFSET;    \
+sys_reg_write(LRO_RX_RING1_CTRL_DW2 + (((x) - 1) << 6), reg_val1); \
+sys_reg_write(LRO_RX_RING1_CTRL_DW3 + (((x) - 1) << 6), reg_val2); \
+}
+
+/* HW LRO debug functions */
+void hw_lro_stats_update(unsigned int ring_num,
+			 struct PDMA_rxdesc *rx_ring);
+void hw_lro_flush_stats_update(unsigned int ring_num,
+			       struct PDMA_rxdesc *rx_ring);
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/raether_qdma.h b/src/kernel/modules/netsys_driver/nat/include/raether_qdma.h
new file mode 100755
index 0000000..ce1af4d
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/raether_qdma.h
@@ -0,0 +1,20 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Carlos Huang <carlos.huang@mediatek.com>
+ * Author: Harry Huang <harry.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RAETHER_QDMA_H
+#define RAETHER_QDMA_H
+
+extern struct net_device *dev_raether;
+void set_fe_dma_glo_cfg(void);
+
+#endif
diff --git a/src/kernel/modules/netsys_driver/nat/include/raether_rss.h b/src/kernel/modules/netsys_driver/nat/include/raether_rss.h
new file mode 100755
index 0000000..07c073f
--- /dev/null
+++ b/src/kernel/modules/netsys_driver/nat/include/raether_rss.h
@@ -0,0 +1,104 @@
+/* Copyright  2016 MediaTek Inc.
+ * Author: Nelson Chang <nelson.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef RA_RSS_H
+#define RA_RSS_H
+
+#include "raeth_reg.h"
+
+#define NUM_RSS_RX_DESC   1024
+#define MAX_RX_RING_NUM_2RING 2
+
+/******RSS define*******/
+#define PDMA_RSS_EN             BIT(0)
+#define PDMA_RSS_BUSY		BIT(1)
+#define PDMA_RSS_CFG_REQ	BIT(2)
+#define PDMA_RSS_CFG_RDY	BIT(3)
+#define PDMA_RSS_INDR_TBL_SIZE		BITS(4, 6)
+#define PDMA_RSS_IPV6_TYPE		BITS(8, 10)
+#define PDMA_RSS_IPV4_TYPE		BITS(12, 14)
+#define PDMA_RSS_IPV6_TUPLE_EN		BITS(16, 20)
+#define PDMA_RSS_IPV4_TUPLE_EN		BITS(24, 28)
+
+#define PDMA_RSS_EN_OFFSET        (0)
+#define PDMA_RSS_BUSY_OFFSET      (1)
+#define PDMA_RSS_CFG_REQ_OFFSET	  (2)
+#define PDMA_RSS_CFG_RDY_OFFSET	  (3)
+#define PDMA_RSS_INDR_TBL_SIZE_OFFSET	(4)
+#define PDMA_RSS_IPV6_TYPE_OFFSET	(8)
+#define PDMA_RSS_IPV4_TYPE_OFFSET	(12)
+#define PDMA_RSS_IPV6_TUPLE_EN_OFFSET	(16)
+#define PDMA_RSS_IPV4_TUPLE_EN_OFFSET	(24)
+
+#define SET_PDMA_RSS_EN(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_EN);   \
+reg_val |= ((x) & 0x1) << PDMA_RSS_EN_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_CFG_REQ(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_CFG_REQ);   \
+reg_val |= ((x) & 0x1) << PDMA_RSS_CFG_REQ_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV4_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV4_TYPE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV4_TYPE_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV6_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV6_TYPE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV6_TYPE_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV4_TUPLE_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV4_TYPE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV4_TUPLE_EN_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_IPV6_TUPLE_TYPE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_IPV6_TYPE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_IPV6_TUPLE_EN_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_INDR_TBL_SIZE(x) \
+{ \
+unsigned int reg_val = sys_reg_read(ADMA_RSS_GLO_CFG); \
+reg_val &= ~(PDMA_RSS_INDR_TBL_SIZE);   \
+reg_val |= ((x) & 0x7) << PDMA_RSS_INDR_TBL_SIZE_OFFSET;  \
+sys_reg_write(ADMA_RSS_GLO_CFG, reg_val); \
+}
+
+#define SET_PDMA_RSS_CR_VALUE(x, y) \
+{ \
+unsigned int reg_val = y; \
+sys_reg_write(x, reg_val); \
+}
+
+#endif