[Feature][T106_eSDK]T106-V2.01.01.02P56U06.AP.15.11_CAP.15.11(SDK4.6)diff_16.08(SDK4.7)

Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No

Change-Id: I7a7c42775e2ffdd23aaec4fff782adcc99d7890b
diff --git a/upstream/linux-5.10/net/8021q/vlan_dev.c b/upstream/linux-5.10/net/8021q/vlan_dev.c
new file mode 100755
index 0000000..86a1c99
--- /dev/null
+++ b/upstream/linux-5.10/net/8021q/vlan_dev.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*-
+ * INET		802.1Q VLAN
+ *		Ethernet-type device handling.
+ *
+ * Authors:	Ben Greear <greearb@candelatech.com>
+ *              Please send support related email to: netdev@vger.kernel.org
+ *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *
+ * Fixes:       Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
+ *                - reset skb->pkt_type on incoming packets when MAC was changed
+ *                - see that changed MAC is saddr for outgoing packets
+ *              Oct 20, 2001:  Ard van Breeman:
+ *                - Fix MC-list, finally.
+ *                - Flush MC-list on VLAN destroy.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <net/arp.h>
+
+#include "vlan.h"
+#include "vlanproc.h"
+#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
+
+/*
+ *	Create the VLAN header for an arbitrary protocol layer
+ *
+ *	saddr=NULL	means use device source address
+ *	daddr=NULL	means leave destination address (eg unresolved arp)
+ *
+ *  This is called when the SKB is moving down the stack towards the
+ *  physical devices.
+ */
+static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+				unsigned short type,
+				const void *daddr, const void *saddr,
+				unsigned int len)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct vlan_hdr *vhdr;
+	unsigned int vhdrlen = 0;
+	u16 vlan_tci = 0;
+	int rc;
+
+	if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
+		vhdr = skb_push(skb, VLAN_HLEN);
+
+		vlan_tci = vlan->vlan_id;
+		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+		vhdr->h_vlan_TCI = htons(vlan_tci);
+
+		/*
+		 *  Set the protocol type. For a packet of type ETH_P_802_3/2 we
+		 *  put the length in here instead.
+		 */
+		if (type != ETH_P_802_3 && type != ETH_P_802_2)
+			vhdr->h_vlan_encapsulated_proto = htons(type);
+		else
+			vhdr->h_vlan_encapsulated_proto = htons(len);
+
+		skb->protocol = vlan->vlan_proto;
+		type = ntohs(vlan->vlan_proto);
+		vhdrlen = VLAN_HLEN;
+	}
+
+	/* Before delegating work to the lower layer, enter our MAC-address */
+	if (saddr == NULL)
+		saddr = dev->dev_addr;
+
+	/* Now make the underlying real hard header */
+	dev = vlan->real_dev;
+	rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
+	if (rc > 0)
+		rc += vhdrlen;
+	return rc;
+}
+
+static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	return netpoll_send_skb(vlan->netpoll, skb);
+#else
+	BUG();
+	return NETDEV_TX_OK;
+#endif
+}
+
+static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
+	unsigned int len;
+	int ret;
+
+	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
+	 *
+	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
+	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
+	 */
+	if (veth->h_vlan_proto != vlan->vlan_proto ||
+	    vlan->flags & VLAN_FLAG_REORDER_HDR) {
+		u16 vlan_tci;
+		vlan_tci = vlan->vlan_id;
+		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+		__vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
+	}
+
+	skb->dev = vlan->real_dev;
+	len = skb->len;
+	if (unlikely(netpoll_tx_running(dev)))
+		return vlan_netpoll_send_skb(vlan, skb);
+
+	ret = dev_queue_xmit(skb);
+
+	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+		struct vlan_pcpu_stats *stats;
+
+		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_packets++;
+		stats->tx_bytes += len;
+		u64_stats_update_end(&stats->syncp);
+	} else {
+		this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
+	}
+
+	return ret;
+}
+
+static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	unsigned int max_mtu = real_dev->mtu;
+
+	if (netif_reduces_vlan_mtu(real_dev))
+		max_mtu -= VLAN_HLEN;
+	if (max_mtu < new_mtu)
+		return -ERANGE;
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+void vlan_dev_set_ingress_priority(const struct net_device *dev,
+				   u32 skb_prio, u16 vlan_prio)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+	if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
+		vlan->nr_ingress_mappings--;
+	else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
+		vlan->nr_ingress_mappings++;
+
+	vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
+}
+
+int vlan_dev_set_egress_priority(const struct net_device *dev,
+				 u32 skb_prio, u16 vlan_prio)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct vlan_priority_tci_mapping *mp = NULL;
+	struct vlan_priority_tci_mapping *np;
+	u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+
+	/* See if a priority mapping exists.. */
+	mp = vlan->egress_priority_map[skb_prio & 0xF];
+	while (mp) {
+		if (mp->priority == skb_prio) {
+			if (mp->vlan_qos && !vlan_qos)
+				vlan->nr_egress_mappings--;
+			else if (!mp->vlan_qos && vlan_qos)
+				vlan->nr_egress_mappings++;
+			mp->vlan_qos = vlan_qos;
+			return 0;
+		}
+		mp = mp->next;
+	}
+
+	/* Create a new mapping then. */
+	mp = vlan->egress_priority_map[skb_prio & 0xF];
+	np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
+	if (!np)
+		return -ENOBUFS;
+
+	np->next = mp;
+	np->priority = skb_prio;
+	np->vlan_qos = vlan_qos;
+	/* Before inserting this element in hash table, make sure all its fields
+	 * are committed to memory.
+	 * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
+	 */
+	smp_wmb();
+	vlan->egress_priority_map[skb_prio & 0xF] = np;
+	if (vlan_qos)
+		vlan->nr_egress_mappings++;
+	return 0;
+}
+
+/* Flags are defined in the vlan_flags enum in
+ * include/uapi/linux/if_vlan.h file.
+ */
+int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	u32 old_flags = vlan->flags;
+
+	if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+		     VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
+		     VLAN_FLAG_BRIDGE_BINDING))
+		return -EINVAL;
+
+	vlan->flags = (old_flags & ~mask) | (flags & mask);
+
+	if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
+		if (vlan->flags & VLAN_FLAG_GVRP)
+			vlan_gvrp_request_join(dev);
+		else
+			vlan_gvrp_request_leave(dev);
+	}
+
+	if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
+		if (vlan->flags & VLAN_FLAG_MVRP)
+			vlan_mvrp_request_join(dev);
+		else
+			vlan_mvrp_request_leave(dev);
+	}
+	return 0;
+}
+
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
+{
+	strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
+}
+
+bool vlan_dev_inherit_address(struct net_device *dev,
+			      struct net_device *real_dev)
+{
+	if (dev->addr_assign_type != NET_ADDR_STOLEN)
+		return false;
+
+	ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+	return true;
+}
+
+static int vlan_dev_open(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+	int err;
+
+	if (!(real_dev->flags & IFF_UP) &&
+	    !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+		return -ENETDOWN;
+
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+	    !vlan_dev_inherit_address(dev, real_dev)) {
+		err = dev_uc_add(real_dev, dev->dev_addr);
+		if (err < 0)
+			goto out;
+	}
+
+	if (dev->flags & IFF_ALLMULTI) {
+		err = dev_set_allmulti(real_dev, 1);
+		if (err < 0)
+			goto del_unicast;
+	}
+	if (dev->flags & IFF_PROMISC) {
+		err = dev_set_promiscuity(real_dev, 1);
+		if (err < 0)
+			goto clear_allmulti;
+	}
+
+	ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);
+
+	if (vlan->flags & VLAN_FLAG_GVRP)
+		vlan_gvrp_request_join(dev);
+
+	if (vlan->flags & VLAN_FLAG_MVRP)
+		vlan_mvrp_request_join(dev);
+
+	if (netif_carrier_ok(real_dev) &&
+	    !(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+		netif_carrier_on(dev);
+	return 0;
+
+clear_allmulti:
+	if (dev->flags & IFF_ALLMULTI)
+		dev_set_allmulti(real_dev, -1);
+del_unicast:
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+		dev_uc_del(real_dev, dev->dev_addr);
+out:
+	netif_carrier_off(dev);
+	return err;
+}
+
+static int vlan_dev_stop(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+
+	dev_mc_unsync(real_dev, dev);
+	dev_uc_unsync(real_dev, dev);
+	if (dev->flags & IFF_ALLMULTI)
+		dev_set_allmulti(real_dev, -1);
+	if (dev->flags & IFF_PROMISC)
+		dev_set_promiscuity(real_dev, -1);
+
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+		dev_uc_del(real_dev, dev->dev_addr);
+
+	if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+		netif_carrier_off(dev);
+	return 0;
+}
+
+static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	struct sockaddr *addr = p;
+	int err;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (!(dev->flags & IFF_UP))
+		goto out;
+
+	if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
+		err = dev_uc_add(real_dev, addr->sa_data);
+		if (err < 0)
+			return err;
+	}
+
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+		dev_uc_del(real_dev, dev->dev_addr);
+
+out:
+	ether_addr_copy(dev->dev_addr, addr->sa_data);
+	return 0;
+}
+
+static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	struct ifreq ifrr;
+	int err = -EOPNOTSUPP;
+
+	strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+	ifrr.ifr_ifru = ifr->ifr_ifru;
+
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		if (!net_eq(dev_net(dev), &init_net))
+			break;
+		fallthrough;
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+	case SIOCGHWTSTAMP:
+		if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
+			err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+		break;
+	}
+
+	if (!err)
+		ifr->ifr_ifru = ifrr.ifr_ifru;
+
+	return err;
+}
+
+static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int err = 0;
+
+	if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
+		err = ops->ndo_neigh_setup(real_dev, pa);
+
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_FCOE)
+static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
+				   struct scatterlist *sgl, unsigned int sgc)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = 0;
+
+	if (ops->ndo_fcoe_ddp_setup)
+		rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
+
+	return rc;
+}
+
+static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int len = 0;
+
+	if (ops->ndo_fcoe_ddp_done)
+		len = ops->ndo_fcoe_ddp_done(real_dev, xid);
+
+	return len;
+}
+
+static int vlan_dev_fcoe_enable(struct net_device *dev)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = -EINVAL;
+
+	if (ops->ndo_fcoe_enable)
+		rc = ops->ndo_fcoe_enable(real_dev);
+	return rc;
+}
+
+static int vlan_dev_fcoe_disable(struct net_device *dev)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = -EINVAL;
+
+	if (ops->ndo_fcoe_disable)
+		rc = ops->ndo_fcoe_disable(real_dev);
+	return rc;
+}
+
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+				    struct scatterlist *sgl, unsigned int sgc)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = 0;
+
+	if (ops->ndo_fcoe_ddp_target)
+		rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+
+	return rc;
+}
+#endif
+
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = -EINVAL;
+
+	if (ops->ndo_fcoe_get_wwn)
+		rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
+	return rc;
+}
+#endif
+
+static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+	if (dev->flags & IFF_UP) {
+		if (change & IFF_ALLMULTI)
+			dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+		if (change & IFF_PROMISC)
+			dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+	}
+}
+
+static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+{
+	dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+	dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+}
+
+/*
+ * vlan network devices have devices nesting below it, and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key vlan_netdev_xmit_lock_key;
+static struct lock_class_key vlan_netdev_addr_lock_key;
+
+static void vlan_dev_set_lockdep_one(struct net_device *dev,
+				     struct netdev_queue *txq,
+				     void *unused)
+{
+	lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
+}
+
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
+{
+	lockdep_set_class(&dev->addr_list_lock,
+			  &vlan_netdev_addr_lock_key);
+	netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
+}
+
+static const struct header_ops vlan_header_ops = {
+	.create	 = vlan_dev_hard_header,
+	.parse	 = eth_header_parse,
+};
+
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+				     unsigned short type,
+				     const void *daddr, const void *saddr,
+				     unsigned int len)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+
+	if (saddr == NULL)
+		saddr = dev->dev_addr;
+
+	return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+	.create	 = vlan_passthru_hard_header,
+	.parse	 = eth_header_parse,
+};
+
+static struct device_type vlan_type = {
+	.name	= "vlan",
+};
+
+static const struct net_device_ops vlan_netdev_ops;
+
+static int vlan_dev_init(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+
+	netif_carrier_off(dev);
+
+	/* IFF_BROADCAST|IFF_MULTICAST; ??? */
+	dev->flags  = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+					  IFF_MASTER | IFF_SLAVE);
+	dev->state  = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
+					  (1<<__LINK_STATE_DORMANT))) |
+		      (1<<__LINK_STATE_PRESENT);
+
+	if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING)
+		dev->state |= (1 << __LINK_STATE_NOCARRIER);
+
+	dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
+			   NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
+			   NETIF_F_GSO_ENCAP_ALL |
+			   NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
+			   NETIF_F_ALL_FCOE;
+
+	dev->features |= dev->hw_features | NETIF_F_LLTX;
+	dev->gso_max_size = real_dev->gso_max_size;
+	dev->gso_max_segs = real_dev->gso_max_segs;
+	if (dev->features & NETIF_F_VLAN_FEATURES)
+		netdev_warn(real_dev, "VLAN features are set incorrectly.  Q-in-Q configurations may not work correctly.\n");
+
+	dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
+	dev->hw_enc_features = vlan_tnl_features(real_dev);
+	dev->mpls_features = real_dev->mpls_features;
+
+	/* ipv6 shared card related stuff */
+	dev->dev_id = real_dev->dev_id;
+
+	if (is_zero_ether_addr(dev->dev_addr)) {
+		ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+		dev->addr_assign_type = NET_ADDR_STOLEN;
+	}
+	if (is_zero_ether_addr(dev->broadcast))
+		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+
+#if IS_ENABLED(CONFIG_FCOE)
+	dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
+#endif
+
+	dev->needed_headroom = real_dev->needed_headroom;
+	if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) {
+		dev->header_ops      = &vlan_passthru_header_ops;
+		dev->hard_header_len = real_dev->hard_header_len;
+	} else {
+		dev->header_ops      = &vlan_header_ops;
+		dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
+	}
+
+	dev->netdev_ops = &vlan_netdev_ops;
+
+	SET_NETDEV_DEVTYPE(dev, &vlan_type);
+
+	vlan_dev_set_lockdep_class(dev);
+
+	vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+	if (!vlan->vlan_pcpu_stats)
+		return -ENOMEM;
+
+	/* Get vlan's reference to real_dev */
+	dev_hold(real_dev);
+
+	return 0;
+}
+
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
+{
+	struct vlan_priority_tci_mapping *pm;
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+		while ((pm = vlan->egress_priority_map[i]) != NULL) {
+			vlan->egress_priority_map[i] = pm->next;
+			kfree(pm);
+		}
+	}
+}
+
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	netdev_features_t old_features = features;
+	netdev_features_t lower_features;
+
+	lower_features = netdev_intersect_features((real_dev->vlan_features |
+						    NETIF_F_RXCSUM),
+						   real_dev->features);
+
+	/* Add HW_CSUM setting to preserve user ability to control
+	 * checksum offload on the vlan device.
+	 */
+	if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+		lower_features |= NETIF_F_HW_CSUM;
+	features = netdev_intersect_features(features, lower_features);
+	features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
+	features |= NETIF_F_LLTX;
+
+	return features;
+}
+
+static int vlan_ethtool_get_link_ksettings(struct net_device *dev,
+					   struct ethtool_link_ksettings *cmd)
+{
+	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+	return __ethtool_get_link_ksettings(vlan->real_dev, cmd);
+}
+
+static void vlan_ethtool_get_drvinfo(struct net_device *dev,
+				     struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, vlan_fullname, sizeof(info->driver));
+	strlcpy(info->version, vlan_version, sizeof(info->version));
+	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+}
+
+static int vlan_ethtool_get_ts_info(struct net_device *dev,
+				    struct ethtool_ts_info *info)
+{
+	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
+	struct phy_device *phydev = vlan->real_dev->phydev;
+
+	if (phy_has_tsinfo(phydev)) {
+		return phy_ts_info(phydev, info);
+	} else if (ops->get_ts_info) {
+		return ops->get_ts_info(vlan->real_dev, info);
+	} else {
+		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+			SOF_TIMESTAMPING_SOFTWARE;
+		info->phc_index = -1;
+	}
+
+	return 0;
+}
+
+static void vlan_dev_get_stats64(struct net_device *dev,
+				 struct rtnl_link_stats64 *stats)
+{
+	struct vlan_pcpu_stats *p;
+	u32 rx_errors = 0, tx_dropped = 0;
+	int i;
+
+	for_each_possible_cpu(i) {
+		u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
+		unsigned int start;
+
+		p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&p->syncp);
+			rxpackets	= p->rx_packets;
+			rxbytes		= p->rx_bytes;
+			rxmulticast	= p->rx_multicast;
+			txpackets	= p->tx_packets;
+			txbytes		= p->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+		stats->rx_packets	+= rxpackets;
+		stats->rx_bytes		+= rxbytes;
+		stats->multicast	+= rxmulticast;
+		stats->tx_packets	+= txpackets;
+		stats->tx_bytes		+= txbytes;
+		/* rx_errors & tx_dropped are u32 */
+		rx_errors	+= p->rx_errors;
+		tx_dropped	+= p->tx_dropped;
+	}
+	stats->rx_errors  = rx_errors;
+	stats->tx_dropped = tx_dropped;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+	return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+	struct netpoll *netpoll;
+	int err = 0;
+
+	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+	err = -ENOMEM;
+	if (!netpoll)
+		goto out;
+
+	err = __netpoll_setup(netpoll, real_dev);
+	if (err) {
+		kfree(netpoll);
+		goto out;
+	}
+
+	vlan->netpoll = netpoll;
+
+out:
+	return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
+	struct netpoll *netpoll = vlan->netpoll;
+
+	if (!netpoll)
+		return;
+
+	vlan->netpoll = NULL;
+	__netpoll_free(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int vlan_dev_get_iflink(const struct net_device *dev)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+	return real_dev->ifindex;
+}
+
+static const struct ethtool_ops vlan_ethtool_ops = {
+	.get_link_ksettings	= vlan_ethtool_get_link_ksettings,
+	.get_drvinfo	        = vlan_ethtool_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= vlan_ethtool_get_ts_info,
+};
+
+static const struct net_device_ops vlan_netdev_ops = {
+	.ndo_change_mtu		= vlan_dev_change_mtu,
+	.ndo_init		= vlan_dev_init,
+	.ndo_uninit		= vlan_dev_uninit,
+	.ndo_open		= vlan_dev_open,
+	.ndo_stop		= vlan_dev_stop,
+	.ndo_start_xmit =  vlan_dev_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= vlan_dev_set_mac_address,
+	.ndo_set_rx_mode	= vlan_dev_set_rx_mode,
+	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
+	.ndo_do_ioctl		= vlan_dev_ioctl,
+	.ndo_neigh_setup	= vlan_dev_neigh_setup,
+	.ndo_get_stats64	= vlan_dev_get_stats64,
+#if IS_ENABLED(CONFIG_FCOE)
+	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
+	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
+	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,
+	.ndo_fcoe_disable	= vlan_dev_fcoe_disable,
+	.ndo_fcoe_ddp_target	= vlan_dev_fcoe_ddp_target,
+#endif
+#ifdef NETDEV_FCOE_WWNN
+	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= vlan_dev_poll_controller,
+	.ndo_netpoll_setup	= vlan_dev_netpoll_setup,
+	.ndo_netpoll_cleanup	= vlan_dev_netpoll_cleanup,
+#endif
+	.ndo_fix_features	= vlan_dev_fix_features,
+	.ndo_get_iflink		= vlan_dev_get_iflink,
+};
+
+static void vlan_dev_free(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+	free_percpu(vlan->vlan_pcpu_stats);
+	vlan->vlan_pcpu_stats = NULL;
+
+	/* Get rid of the vlan's reference to real_dev */
+	dev_put(vlan->real_dev);
+}
+
+void vlan_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->priv_flags		|= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
+	dev->priv_flags		|= IFF_UNICAST_FLT;
+	dev->priv_flags		&= ~IFF_TX_SKB_SHARING;
+	netif_keep_dst(dev);
+
+	dev->netdev_ops		= &vlan_netdev_ops;
+	dev->needs_free_netdev	= true;
+	dev->priv_destructor	= vlan_dev_free;
+	dev->ethtool_ops	= &vlan_ethtool_ops;
+
+	dev->min_mtu		= 0;
+	dev->max_mtu		= ETH_MAX_MTU;
+
+	eth_zero_addr(dev->broadcast);
+}
diff --git a/upstream/linux-5.10/net/bridge/br_fdb.c b/upstream/linux-5.10/net/bridge/br_fdb.c
new file mode 100755
index 0000000..65a6054
--- /dev/null
+++ b/upstream/linux-5.10/net/bridge/br_fdb.c
@@ -0,0 +1,1611 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *	Forwarding database
+ *	Linux ethernet bridge
+ *
+ *	Authors:
+ *	Lennert Buytenhek		<buytenh@gnu.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/times.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <asm/unaligned.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <trace/events/bridge.h>
+#include "br_private.h"
+#ifdef CONFIG_FASTNAT_MODULE
+#include <net/SI/fast_common.h>
+#include <net/SI/net_track.h>
+#include <net/SI/netioc_proc.h>
+#endif
+
+static const struct rhashtable_params br_fdb_rht_params = {
+	.head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
+	.key_offset = offsetof(struct net_bridge_fdb_entry, key),
+	.key_len = sizeof(struct net_bridge_fdb_key),
+	.automatic_shrinking = true,
+};
+
+static struct kmem_cache *br_fdb_cache __read_mostly;
+static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+		      const unsigned char *addr, u16 vid);
+static void fdb_notify(struct net_bridge *br,
+		       const struct net_bridge_fdb_entry *, int, bool);
+
+int __init br_fdb_init(void)
+{
+	br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
+					 sizeof(struct net_bridge_fdb_entry),
+					 0,
+					 SLAB_HWCACHE_ALIGN, NULL);
+	if (!br_fdb_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void br_fdb_fini(void)
+{
+	kmem_cache_destroy(br_fdb_cache);
+}
+
+int br_fdb_hash_init(struct net_bridge *br)
+{
+	return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
+}
+
+void br_fdb_hash_fini(struct net_bridge *br)
+{
+	rhashtable_destroy(&br->fdb_hash_tbl);
+}
+
+/* if topology_changing then use forward_delay (default 15 sec)
+ * otherwise keep longer (default 5 minutes)
+ */
+static inline unsigned long hold_time(const struct net_bridge *br)
+{
+	return br->topology_change ? br->forward_delay : br->ageing_time;
+}
+
+static inline int has_expired(const struct net_bridge *br,
+				  const struct net_bridge_fdb_entry *fdb)
+{
+	return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
+	       !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
+	       time_before_eq(fdb->updated + hold_time(br), jiffies);
+}
+
+static void fdb_rcu_free(struct rcu_head *head)
+{
+	struct net_bridge_fdb_entry *ent
+		= container_of(head, struct net_bridge_fdb_entry, rcu);
+	kmem_cache_free(br_fdb_cache, ent);
+}
+
+static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
+						 const unsigned char *addr,
+						 __u16 vid)
+{
+	struct net_bridge_fdb_key key;
+
+	WARN_ON_ONCE(!rcu_read_lock_held());
+
+	key.vlan_id = vid;
+	memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
+
+	return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
+}
+
+/* requires bridge hash_lock */
+static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
+						const unsigned char *addr,
+						__u16 vid)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	lockdep_assert_held_once(&br->hash_lock);
+
+	rcu_read_lock();
+	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+	rcu_read_unlock();
+
+	return fdb;
+}
+
+struct net_device *br_fdb_find_port(const struct net_device *br_dev,
+				    const unsigned char *addr,
+				    __u16 vid)
+{
+	struct net_bridge_fdb_entry *f;
+	struct net_device *dev = NULL;
+	struct net_bridge *br;
+
+	ASSERT_RTNL();
+
+	if (!netif_is_bridge_master(br_dev))
+		return NULL;
+
+	br = netdev_priv(br_dev);
+	rcu_read_lock();
+	f = br_fdb_find_rcu(br, addr, vid);
+	if (f && f->dst)
+		dev = f->dst->dev;
+	rcu_read_unlock();
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(br_fdb_find_port);
+
+struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
+					     const unsigned char *addr,
+					     __u16 vid)
+{
+	return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+}
+
+/* When a static FDB entry is added, the mac address from the entry is
+ * added to the bridge private HW address list and all required ports
+ * are then updated with the new information.
+ * Called under RTNL.
+ */
+static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
+{
+	int err;
+	struct net_bridge_port *p;
+
+	ASSERT_RTNL();
+
+	list_for_each_entry(p, &br->port_list, list) {
+		if (!br_promisc_port(p)) {
+			err = dev_uc_add(p->dev, addr);
+			if (err)
+				goto undo;
+		}
+	}
+
+	return;
+undo:
+	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
+		if (!br_promisc_port(p))
+			dev_uc_del(p->dev, addr);
+	}
+}
+
+/* When a static FDB entry is deleted, the HW address from that entry is
+ * also removed from the bridge private HW address list and updates all
+ * the ports with needed information.
+ * Called under RTNL.
+ */
+static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
+{
+	struct net_bridge_port *p;
+
+	ASSERT_RTNL();
+
+	list_for_each_entry(p, &br->port_list, list) {
+		if (!br_promisc_port(p))
+			dev_uc_del(p->dev, addr);
+	}
+}
+
+static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
+		       bool swdev_notify)
+{
+	trace_fdb_delete(br, f);
+
+	if (test_bit(BR_FDB_STATIC, &f->flags))
+		fdb_del_hw_addr(br, f->key.addr.addr);
+
+	hlist_del_init_rcu(&f->fdb_node);
+	rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
+			       br_fdb_rht_params);
+	fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
+	call_rcu(&f->rcu, fdb_rcu_free);
+}
+
+/* Delete a local entry if no other port had the same address. */
+static void fdb_delete_local(struct net_bridge *br,
+			     const struct net_bridge_port *p,
+			     struct net_bridge_fdb_entry *f)
+{
+	const unsigned char *addr = f->key.addr.addr;
+	struct net_bridge_vlan_group *vg;
+	const struct net_bridge_vlan *v;
+	struct net_bridge_port *op;
+	u16 vid = f->key.vlan_id;
+
+	/* Maybe another port has same hw addr? */
+	list_for_each_entry(op, &br->port_list, list) {
+		vg = nbp_vlan_group(op);
+		if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+		    (!vid || br_vlan_find(vg, vid))) {
+			f->dst = op;
+			clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+			return;
+		}
+	}
+
+	vg = br_vlan_group(br);
+	v = br_vlan_find(vg, vid);
+	/* Maybe bridge device has same hw addr? */
+	if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+	    (!vid || (v && br_vlan_should_use(v)))) {
+		f->dst = NULL;
+		clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+		return;
+	}
+
+	fdb_delete(br, f, true);
+}
+
+void br_fdb_find_delete_local(struct net_bridge *br,
+			      const struct net_bridge_port *p,
+			      const unsigned char *addr, u16 vid)
+{
+	struct net_bridge_fdb_entry *f;
+
+	spin_lock_bh(&br->hash_lock);
+	f = br_fdb_find(br, addr, vid);
+	if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+	    !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
+		fdb_delete_local(br, p, f);
+	spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
+{
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_fdb_entry *f;
+	struct net_bridge *br = p->br;
+	struct net_bridge_vlan *v;
+
+	spin_lock_bh(&br->hash_lock);
+	vg = nbp_vlan_group(p);
+	hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
+		if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
+		    !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
+			/* delete old one */
+			fdb_delete_local(br, p, f);
+
+			/* if this port has no vlan information
+			 * configured, we can safely be done at
+			 * this point.
+			 */
+			if (!vg || !vg->num_vlans)
+				goto insert;
+		}
+	}
+
+insert:
+	/* insert new address,  may fail if invalid address or dup. */
+	fdb_insert(br, p, newaddr, 0);
+
+	if (!vg || !vg->num_vlans)
+		goto done;
+
+	/* Now add entries for every VLAN configured on the port.
+	 * This function runs under RTNL so the bitmap will not change
+	 * from under us.
+	 */
+	list_for_each_entry(v, &vg->vlan_list, vlist)
+		fdb_insert(br, p, newaddr, v->vid);
+
+done:
+	spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+{
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_fdb_entry *f;
+	struct net_bridge_vlan *v;
+
+	spin_lock_bh(&br->hash_lock);
+
+	/* If old entry was unassociated with any port, then delete it. */
+	f = br_fdb_find(br, br->dev->dev_addr, 0);
+	if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+	    !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
+		fdb_delete_local(br, NULL, f);
+
+	fdb_insert(br, NULL, newaddr, 0);
+	vg = br_vlan_group(br);
+	if (!vg || !vg->num_vlans)
+		goto out;
+	/* Now remove and add entries for every VLAN configured on the
+	 * bridge.  This function runs under RTNL so the bitmap will not
+	 * change from under us.
+	 */
+	list_for_each_entry(v, &vg->vlan_list, vlist) {
+		if (!br_vlan_should_use(v))
+			continue;
+		f = br_fdb_find(br, br->dev->dev_addr, v->vid);
+		if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+		    !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
+			fdb_delete_local(br, NULL, f);
+		fdb_insert(br, NULL, newaddr, v->vid);
+	}
+out:
+	spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_cleanup(struct work_struct *work)
+{
+	struct net_bridge *br = container_of(work, struct net_bridge,
+					     gc_work.work);
+	struct net_bridge_fdb_entry *f = NULL;
+	unsigned long delay = hold_time(br);
+	unsigned long work_delay = delay;
+	unsigned long now = jiffies;
+
+	/* this part is tricky, in order to avoid blocking learning and
+	 * consequently forwarding, we rely on rcu to delete objects with
+	 * delayed freeing allowing us to continue traversing
+	 */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		unsigned long this_timer = f->updated + delay;
+
+		if (test_bit(BR_FDB_STATIC, &f->flags) ||
+		    test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
+			if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
+				if (time_after(this_timer, now))
+					work_delay = min(work_delay,
+							 this_timer - now);
+				else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
+							   &f->flags))
+					fdb_notify(br, f, RTM_NEWNEIGH, false);
+			}
+			continue;
+		}
+
+		if (time_after(this_timer, now)) {
+			work_delay = min(work_delay, this_timer - now);
+		} else {
+			spin_lock_bh(&br->hash_lock);
+			if (!hlist_unhashed(&f->fdb_node))
+				fdb_delete(br, f, true);
+			spin_unlock_bh(&br->hash_lock);
+		}
+	}
+	rcu_read_unlock();
+
+	/* Cleanup minimum 10 milliseconds apart */
+	work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
+	mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
+}
+
+/* Completely flush all dynamic entries in forwarding database.*/
+void br_fdb_flush(struct net_bridge *br)
+{
+	struct net_bridge_fdb_entry *f;
+	struct hlist_node *tmp;
+
+	spin_lock_bh(&br->hash_lock);
+	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+		if (!test_bit(BR_FDB_STATIC, &f->flags))
+			fdb_delete(br, f, true);
+	}
+	spin_unlock_bh(&br->hash_lock);
+}
+
+/* Flush all entries referring to a specific port.
+ * if do_all is set also flush static entries
+ * if vid is set delete all entries that match the vlan_id
+ */
+void br_fdb_delete_by_port(struct net_bridge *br,
+			   const struct net_bridge_port *p,
+			   u16 vid,
+			   int do_all)
+{
+	struct net_bridge_fdb_entry *f;
+	struct hlist_node *tmp;
+
+	spin_lock_bh(&br->hash_lock);
+	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+		if (f->dst != p)
+			continue;
+
+		if (!do_all)
+			if (test_bit(BR_FDB_STATIC, &f->flags) ||
+			    (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
+			     !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
+			    (vid && f->key.vlan_id != vid))
+				continue;
+
+		if (test_bit(BR_FDB_LOCAL, &f->flags))
+			fdb_delete_local(br, p, f);
+		else
+			fdb_delete(br, f, true);
+	}
+	spin_unlock_bh(&br->hash_lock);
+}
+
+#if IS_ENABLED(CONFIG_ATM_LANE)
+/* Interface used by ATM LANE hook to test
+ * if an addr is on some other bridge port */
+int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
+{
+	struct net_bridge_fdb_entry *fdb;
+	struct net_bridge_port *port;
+	int ret;
+
+	rcu_read_lock();
+	port = br_port_get_rcu(dev);
+	if (!port)
+		ret = 0;
+	else {
+		fdb = br_fdb_find_rcu(port->br, addr, 0);
+		ret = fdb && fdb->dst && fdb->dst->dev != dev &&
+			fdb->dst->state == BR_STATE_FORWARDING;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+#endif /* CONFIG_ATM_LANE */
+
+/*
+ * Fill buffer with forwarding table records in
+ * the API format.
+ */
+int br_fdb_fillbuf(struct net_bridge *br, void *buf,
+		   unsigned long maxnum, unsigned long skip)
+{
+	struct net_bridge_fdb_entry *f;
+	struct __fdb_entry *fe = buf;
+	int num = 0;
+
+	memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		if (num >= maxnum)
+			break;
+
+		if (has_expired(br, f))
+			continue;
+
+		/* ignore pseudo entry for local MAC address */
+		if (!f->dst)
+			continue;
+
+		if (skip) {
+			--skip;
+			continue;
+		}
+
+		/* convert from internal format to API */
+		memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
+
+		/* due to ABI compat need to split into hi/lo */
+		fe->port_no = f->dst->port_no;
+		fe->port_hi = f->dst->port_no >> 8;
+
+		fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
+		if (!test_bit(BR_FDB_STATIC, &f->flags))
+			fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
+		++fe;
+		++num;
+	}
+	rcu_read_unlock();
+
+	return num;
+}
+
+static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
+					       struct net_bridge_port *source,
+					       const unsigned char *addr,
+					       __u16 vid,
+					       unsigned long flags)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
+	if (fdb) {
+		memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
+		fdb->dst = source;
+		fdb->key.vlan_id = vid;
+		fdb->flags = flags;
+		fdb->updated = fdb->used = jiffies;
+		if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
+						  &fdb->rhnode,
+						  br_fdb_rht_params)) {
+			kmem_cache_free(br_fdb_cache, fdb);
+			fdb = NULL;
+		} else {
+			hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
+		}
+	}
+	return fdb;
+}
+
+static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+		  const unsigned char *addr, u16 vid)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (fdb) {
+		/* it is okay to have multiple ports with same
+		 * address, just use the first one.
+		 */
+		if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+			return 0;
+		br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
+		       source ? source->dev->name : br->dev->name, addr, vid);
+		fdb_delete(br, fdb, true);
+	}
+
+	fdb = fdb_create(br, source, addr, vid,
+			 BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
+	if (!fdb)
+		return -ENOMEM;
+
+	fdb_add_hw_addr(br, addr);
+	fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+	return 0;
+}
+
+/* Add entry for local address of interface */
+int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+		  const unsigned char *addr, u16 vid)
+{
+	int ret;
+
+	spin_lock_bh(&br->hash_lock);
+	ret = fdb_insert(br, source, addr, vid);
+	spin_unlock_bh(&br->hash_lock);
+	return ret;
+}
+
+/* returns true if the fdb was modified */
+static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
+{
+	return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
+		  test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
+}
+
+void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+		   const unsigned char *addr, u16 vid, unsigned long flags)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	/* some users want to always flood. */
+	if (hold_time(br) == 0)
+		return;
+
+	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+	if (likely(fdb)) {
+		/* attempt to update an entry for a local interface */
+		if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
+			if (net_ratelimit())
+				br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
+					source->dev->name, addr, vid);
+		} else {
+			unsigned long now = jiffies;
+			bool fdb_modified = false;
+
+			if (now != fdb->updated) {
+				fdb->updated = now;
+				fdb_modified = __fdb_mark_active(fdb);
+			}
+
+			/* fastpath: update of existing entry */
+			if (unlikely(source != fdb->dst &&
+				     !test_bit(BR_FDB_STICKY, &fdb->flags))) {
+				fdb->dst = source;
+				fdb_modified = true;
+				/* Take over HW learned entry */
+				if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+						      &fdb->flags)))
+					clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+						  &fdb->flags);
+			}
+
+			if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
+				set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+			if (unlikely(fdb_modified)) {
+				trace_br_fdb_update(br, source, addr, vid, flags);
+				fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+			}
+		}
+	} else {
+		spin_lock(&br->hash_lock);
+		fdb = fdb_create(br, source, addr, vid, flags);
+		if (fdb) {
+			trace_br_fdb_update(br, source, addr, vid, flags);
+			fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+		}
+		/* else  we lose race and someone else inserts
+		 * it first, don't bother updating
+		 */
+		spin_unlock(&br->hash_lock);
+	}
+}
+
+static int fdb_to_nud(const struct net_bridge *br,
+		      const struct net_bridge_fdb_entry *fdb)
+{
+	if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+		return NUD_PERMANENT;
+	else if (test_bit(BR_FDB_STATIC, &fdb->flags))
+		return NUD_NOARP;
+	else if (has_expired(br, fdb))
+		return NUD_STALE;
+	else
+		return NUD_REACHABLE;
+}
+
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
+			 const struct net_bridge_fdb_entry *fdb,
+			 u32 portid, u32 seq, int type, unsigned int flags)
+{
+	unsigned long now = jiffies;
+	struct nda_cacheinfo ci;
+	struct nlmsghdr *nlh;
+	struct ndmsg *ndm;
+
+	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+	if (nlh == NULL)
+		return -EMSGSIZE;
+
+	ndm = nlmsg_data(nlh);
+	ndm->ndm_family	 = AF_BRIDGE;
+	ndm->ndm_pad1    = 0;
+	ndm->ndm_pad2    = 0;
+	ndm->ndm_flags	 = 0;
+	ndm->ndm_type	 = 0;
+	ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
+	ndm->ndm_state   = fdb_to_nud(br, fdb);
+
+	if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+		ndm->ndm_flags |= NTF_OFFLOADED;
+	if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
+		ndm->ndm_flags |= NTF_EXT_LEARNED;
+	if (test_bit(BR_FDB_STICKY, &fdb->flags))
+		ndm->ndm_flags |= NTF_STICKY;
+
+	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
+		goto nla_put_failure;
+	if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
+		goto nla_put_failure;
+	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
+	ci.ndm_confirmed = 0;
+	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
+	ci.ndm_refcnt	 = 0;
+	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
+
+	if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
+					&fdb->key.vlan_id))
+		goto nla_put_failure;
+
+	if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+		struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
+		u8 notify_bits = FDB_NOTIFY_BIT;
+
+		if (!nest)
+			goto nla_put_failure;
+		if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+			notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
+
+		if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
+			nla_nest_cancel(skb, nest);
+			goto nla_put_failure;
+		}
+
+		nla_nest_end(skb, nest);
+	}
+
+	nlmsg_end(skb, nlh);
+	return 0;
+
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static inline size_t fdb_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ndmsg))
+		+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+		+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
+		+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
+		+ nla_total_size(sizeof(struct nda_cacheinfo))
+		+ nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
+		+ nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
+}
+
+static void fdb_notify(struct net_bridge *br,
+		       const struct net_bridge_fdb_entry *fdb, int type,
+		       bool swdev_notify)
+{
+	struct net *net = dev_net(br->dev);
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+
+	if (swdev_notify)
+		br_switchdev_fdb_notify(fdb, type);
+
+	skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
+	if (skb == NULL)
+		goto errout;
+
+	err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
+	if (err < 0) {
+		/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(skb);
+		goto errout;
+	}
+	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+	return;
+errout:
+	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+/* Dump information about entries, in response to GETNEIGH */
+int br_fdb_dump(struct sk_buff *skb,
+		struct netlink_callback *cb,
+		struct net_device *dev,
+		struct net_device *filter_dev,
+		int *idx)
+{
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_fdb_entry *f;
+	int err = 0;
+
+	if (!(dev->priv_flags & IFF_EBRIDGE))
+		return err;
+
+	if (!filter_dev) {
+		err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+		if (err < 0)
+			return err;
+	}
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		if (*idx < cb->args[2])
+			goto skip;
+		if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
+			if (filter_dev != dev)
+				goto skip;
+			/* !f->dst is a special case for bridge
+			 * It means the MAC belongs to the bridge
+			 * Therefore need a little more filtering
+			 * we only want to dump the !f->dst case
+			 */
+			if (f->dst)
+				goto skip;
+		}
+		if (!filter_dev && f->dst)
+			goto skip;
+
+		err = fdb_fill_info(skb, br, f,
+				    NETLINK_CB(cb->skb).portid,
+				    cb->nlh->nlmsg_seq,
+				    RTM_NEWNEIGH,
+				    NLM_F_MULTI);
+		if (err < 0)
+			break;
+skip:
+		*idx += 1;
+	}
+	rcu_read_unlock();
+
+	return err;
+}
+
+int br_fdb_get(struct sk_buff *skb,
+	       struct nlattr *tb[],
+	       struct net_device *dev,
+	       const unsigned char *addr,
+	       u16 vid, u32 portid, u32 seq,
+	       struct netlink_ext_ack *extack)
+{
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_fdb_entry *f;
+	int err = 0;
+
+	rcu_read_lock();
+	f = br_fdb_find_rcu(br, addr, vid);
+	if (!f) {
+		NL_SET_ERR_MSG(extack, "Fdb entry not found");
+		err = -ENOENT;
+		goto errout;
+	}
+
+	err = fdb_fill_info(skb, br, f, portid, seq,
+			    RTM_NEWNEIGH, 0);
+errout:
+	rcu_read_unlock();
+	return err;
+}
+
+/* returns true if the fdb is modified */
+static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
+{
+	bool modified = false;
+
+	/* allow to mark an entry as inactive, usually done on creation */
+	if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
+	    !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+		modified = true;
+
+	if ((notify & FDB_NOTIFY_BIT) &&
+	    !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+		/* enabled activity tracking */
+		modified = true;
+	} else if (!(notify & FDB_NOTIFY_BIT) &&
+		   test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+		/* disabled activity tracking, clear notify state */
+		clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
+		modified = true;
+	}
+
+	return modified;
+}
+
+/* Update (create or replace) forwarding database entry */
+static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
+			 const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
+			 struct nlattr *nfea_tb[])
+{
+	bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
+	bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
+	struct net_bridge_fdb_entry *fdb;
+	u16 state = ndm->ndm_state;
+	bool modified = false;
+	u8 notify = 0;
+
+	/* If the port cannot learn allow only local and static entries */
+	if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
+	    !(source->state == BR_STATE_LEARNING ||
+	      source->state == BR_STATE_FORWARDING))
+		return -EPERM;
+
+	if (!source && !(state & NUD_PERMANENT)) {
+		pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
+			br->dev->name);
+		return -EINVAL;
+	}
+
+	if (is_sticky && (state & NUD_PERMANENT))
+		return -EINVAL;
+
+	if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
+		notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
+		if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
+		    (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
+			return -EINVAL;
+	}
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (fdb == NULL) {
+		if (!(flags & NLM_F_CREATE))
+			return -ENOENT;
+
+		fdb = fdb_create(br, source, addr, vid, 0);
+		if (!fdb)
+			return -ENOMEM;
+
+		modified = true;
+	} else {
+		if (flags & NLM_F_EXCL)
+			return -EEXIST;
+
+		if (fdb->dst != source) {
+			fdb->dst = source;
+			modified = true;
+		}
+	}
+
+	if (fdb_to_nud(br, fdb) != state) {
+		if (state & NUD_PERMANENT) {
+			set_bit(BR_FDB_LOCAL, &fdb->flags);
+			if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
+				fdb_add_hw_addr(br, addr);
+		} else if (state & NUD_NOARP) {
+			clear_bit(BR_FDB_LOCAL, &fdb->flags);
+			if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
+				fdb_add_hw_addr(br, addr);
+		} else {
+			clear_bit(BR_FDB_LOCAL, &fdb->flags);
+			if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
+				fdb_del_hw_addr(br, addr);
+		}
+
+		modified = true;
+	}
+
+	if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
+		change_bit(BR_FDB_STICKY, &fdb->flags);
+		modified = true;
+	}
+
+	if (fdb_handle_notify(fdb, notify))
+		modified = true;
+
+	set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+
+	fdb->used = jiffies;
+	if (modified) {
+		if (refresh)
+			fdb->updated = jiffies;
+		fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+	}
+
+	return 0;
+}
+
+static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
+			struct net_bridge_port *p, const unsigned char *addr,
+			u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
+			struct netlink_ext_ack *extack)
+{
+	int err = 0;
+
+	if (ndm->ndm_flags & NTF_USE) {
+		if (!p) {
+			pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
+				br->dev->name);
+			return -EINVAL;
+		}
+		if (!nbp_state_should_learn(p))
+			return 0;
+
+		local_bh_disable();
+		rcu_read_lock();
+		br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
+		rcu_read_unlock();
+		local_bh_enable();
+	} else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
+		if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
+			NL_SET_ERR_MSG_MOD(extack,
+					   "FDB entry towards bridge must be permanent");
+			return -EINVAL;
+		}
+		err = br_fdb_external_learn_add(br, p, addr, vid, true);
+	} else {
+		spin_lock_bh(&br->hash_lock);
+		err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
+		spin_unlock_bh(&br->hash_lock);
+	}
+
+	return err;
+}
+
+static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
+	[NFEA_ACTIVITY_NOTIFY]	= { .type = NLA_U8 },
+	[NFEA_DONT_REFRESH]	= { .type = NLA_FLAG },
+};
+
+/* Add new permanent fdb entry with RTM_NEWNEIGH */
+int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+	       struct net_device *dev,
+	       const unsigned char *addr, u16 vid, u16 nlh_flags,
+	       struct netlink_ext_ack *extack)
+{
+	struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan *v;
+	struct net_bridge *br = NULL;
+	int err = 0;
+
+	trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
+
+	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
+		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
+		return -EINVAL;
+	}
+
+	if (is_zero_ether_addr(addr)) {
+		pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
+		return -EINVAL;
+	}
+
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	} else {
+		p = br_port_get_rtnl(dev);
+		if (!p) {
+			pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
+				dev->name);
+			return -EINVAL;
+		}
+		br = p->br;
+		vg = nbp_vlan_group(p);
+	}
+
+	if (tb[NDA_FDB_EXT_ATTRS]) {
+		attr = tb[NDA_FDB_EXT_ATTRS];
+		err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
+				       br_nda_fdb_pol, extack);
+		if (err)
+			return err;
+	} else {
+		memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
+	}
+
+	if (vid) {
+		v = br_vlan_find(vg, vid);
+		if (!v || !br_vlan_should_use(v)) {
+			pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
+			return -EINVAL;
+		}
+
+		/* VID was specified, so use it. */
+		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
+				   extack);
+	} else {
+		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
+				   extack);
+		if (err || !vg || !vg->num_vlans)
+			goto out;
+
+		/* We have vlans configured on this port and user didn't
+		 * specify a VLAN.  To be nice, add/update entry for every
+		 * vlan on this port.
+		 */
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			if (!br_vlan_should_use(v))
+				continue;
+			err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
+					   nfea_tb, extack);
+			if (err)
+				goto out;
+		}
+	}
+
+out:
+	return err;
+}
+
+static int fdb_delete_by_addr_and_port(struct net_bridge *br,
+				       const struct net_bridge_port *p,
+				       const u8 *addr, u16 vlan)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	fdb = br_fdb_find(br, addr, vlan);
+	if (!fdb || fdb->dst != p)
+		return -ENOENT;
+
+	fdb_delete(br, fdb, true);
+
+	return 0;
+}
+
+static int __br_fdb_delete(struct net_bridge *br,
+			   const struct net_bridge_port *p,
+			   const unsigned char *addr, u16 vid)
+{
+	int err;
+
+	spin_lock_bh(&br->hash_lock);
+	err = fdb_delete_by_addr_and_port(br, p, addr, vid);
+	spin_unlock_bh(&br->hash_lock);
+
+	return err;
+}
+
+/* Remove neighbor entry with RTM_DELNEIGH */
+int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+		  struct net_device *dev,
+		  const unsigned char *addr, u16 vid)
+{
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan *v;
+	struct net_bridge *br;
+	int err;
+
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	} else {
+		p = br_port_get_rtnl(dev);
+		if (!p) {
+			pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
+				dev->name);
+			return -EINVAL;
+		}
+		vg = nbp_vlan_group(p);
+		br = p->br;
+	}
+
+	if (vid) {
+		v = br_vlan_find(vg, vid);
+		if (!v) {
+			pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
+			return -EINVAL;
+		}
+
+		err = __br_fdb_delete(br, p, addr, vid);
+	} else {
+		err = -ENOENT;
+		err &= __br_fdb_delete(br, p, addr, 0);
+		if (!vg || !vg->num_vlans)
+			return err;
+
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			if (!br_vlan_should_use(v))
+				continue;
+			err &= __br_fdb_delete(br, p, addr, v->vid);
+		}
+	}
+
+	return err;
+}
+
+int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+	struct net_bridge_fdb_entry *f, *tmp;
+	int err = 0;
+
+	ASSERT_RTNL();
+
+	/* the key here is that static entries change only under rtnl */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!test_bit(BR_FDB_STATIC, &f->flags))
+			continue;
+		err = dev_uc_add(p->dev, f->key.addr.addr);
+		if (err)
+			goto rollback;
+	}
+done:
+	rcu_read_unlock();
+
+	return err;
+
+rollback:
+	hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!test_bit(BR_FDB_STATIC, &tmp->flags))
+			continue;
+		if (tmp == f)
+			break;
+		dev_uc_del(p->dev, tmp->key.addr.addr);
+	}
+
+	goto done;
+}
+
+void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+	struct net_bridge_fdb_entry *f;
+
+	ASSERT_RTNL();
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!test_bit(BR_FDB_STATIC, &f->flags))
+			continue;
+
+		dev_uc_del(p->dev, f->key.addr.addr);
+	}
+	rcu_read_unlock();
+}
+
+int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+			      const unsigned char *addr, u16 vid,
+			      bool swdev_notify)
+{
+	struct net_bridge_fdb_entry *fdb;
+	bool modified = false;
+	int err = 0;
+
+	trace_br_fdb_external_learn_add(br, p, addr, vid);
+
+	spin_lock_bh(&br->hash_lock);
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (!fdb) {
+		unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
+
+		if (swdev_notify)
+			flags |= BIT(BR_FDB_ADDED_BY_USER);
+
+		if (!p)
+			flags |= BIT(BR_FDB_LOCAL);
+
+		fdb = fdb_create(br, p, addr, vid, flags);
+		if (!fdb) {
+			err = -ENOMEM;
+			goto err_unlock;
+		}
+		fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
+	} else {
+		fdb->updated = jiffies;
+
+		if (fdb->dst != p) {
+			fdb->dst = p;
+			modified = true;
+		}
+
+		if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+			/* Refresh entry */
+			fdb->used = jiffies;
+		} else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
+			/* Take over SW learned entry */
+			set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
+			modified = true;
+		}
+
+		if (swdev_notify)
+			set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+
+		if (!p)
+			set_bit(BR_FDB_LOCAL, &fdb->flags);
+
+		if (modified)
+			fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
+	}
+
+err_unlock:
+	spin_unlock_bh(&br->hash_lock);
+
+	return err;
+}
+
+int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
+			      const unsigned char *addr, u16 vid,
+			      bool swdev_notify)
+{
+	struct net_bridge_fdb_entry *fdb;
+	int err = 0;
+
+	spin_lock_bh(&br->hash_lock);
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
+		fdb_delete(br, fdb, swdev_notify);
+	else
+		err = -ENOENT;
+
+	spin_unlock_bh(&br->hash_lock);
+
+	return err;
+}
+
+void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
+			  const unsigned char *addr, u16 vid, bool offloaded)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	spin_lock_bh(&br->hash_lock);
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+		change_bit(BR_FDB_OFFLOADED, &fdb->flags);
+
+	spin_unlock_bh(&br->hash_lock);
+}
+#ifdef CONFIG_FASTNAT_MODULE
+int fast_br(struct sk_buff *skb)
+{
+	//lium_fastnat_del
+	const unsigned char *dest = NULL;
+	struct hlist_head *head;
+	struct net_bridge_fdb_entry *fdb;
+	struct net_bridge_port *p;
+	struct net_bridge *br;
+	u16 vid = 0;
+	int ntl_port_id = 0xff;
+	
+	if(!skb->dev)
+	{
+		//print_sun(SUN_DBG, "fast_br  skb->dev err skb->dev = %x\n", skb->dev);
+		return 0;
+	}
+					  
+	/*if(skb->mac_header == 0 || skb->mac_header == ~0U)
+		panic("driver  not  set  macheader !!!\n");*/
+					  
+	dest = eth_hdr(skb)->h_dest;
+				  
+
+	p = br_port_get_rtnl(skb->dev);
+	if (p == NULL || p->br == NULL)		  
+	{
+		//print_sun(SUN_DBG, "fast_br  br_port_get_rtnl err p = %x\n", p);
+		return 0;
+	}
+				  
+	br = p->br;
+	br_should_learn(p, skb, &vid);
+
+	//head = &br->hash[br_mac_hash(dest, vid)];
+					  
+	//if((fdb = fdb_find_rcu(head, dest,vid)) != NULL)
+	if((fdb = fdb_find_rcu(&(br->fdb_hash_tbl), dest,vid)) != NULL)
+	{
+		
+		if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev && 
+			(((fdb->dst->flags & BR_HAIRPIN_MODE) || skb->dev != fdb->dst->dev) &&fdb->dst->state == BR_STATE_FORWARDING)) 
+		{
+			fast_tcpdump(skb);
+			if(fastnat_level == FAST_NET_DEVICE){
+				skb->dev->stats.rx_packets++;
+				skb->dev->stats.rx_bytes += skb->len;
+			}
+			skb->dev = fdb->dst->dev;
+			skb->isFastbr = 1;
+			fdb->updated = jiffies;
+			skb->now_location |= FASTBR_SUCC;
+			skb_rest_data_byproto(skb);
+
+			br_dev_queue_push_xmit(NULL, NULL, skb);
+			return 1;
+		}
+			  
+		
+		if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev && 
+			(skb->dev == fdb->dst->dev) && fdb->dst->state == BR_STATE_FORWARDING)
+		{
+			skbinfo_add(NULL,SKB_LOOP);
+			skb->dev->stats.rx_dropped++;
+			//print_sun(SUN_ERR,"fast_br loop data discarded, dev:%s \n", skb->dev->name);
+			kfree_skb(skb);
+			return 1;
+		}
+	}
+	//print_sun(SUN_DBG, "fast_br  fdb_find_rcu err fdb = %x \n",fdb);
+	
+	return 0;
+}
+EXPORT_SYMBOL(fast_br);
+
+
+
+struct net_device *getbrport_bydst(struct net_device *dev,unsigned char *dest)
+{
+	//lium_fastnat_del
+	//struct hlist_head *head;
+	struct net_bridge_fdb_entry *fdb;
+	struct net_bridge_port *p;
+	struct net_bridge *br;
+	struct net_bridge_vlan_group *vg;
+	__u16 vid;
+#if FASTNAT_DEBUG
+	printk("getbrport_bydst() begine");
+#endif
+
+	if (dev == NULL || !(dev->priv_flags & IFF_EBRIDGE))
+		return dev;
+#if FASTNAT_DEBUG
+	printk("getbrport_bydst() 1");
+#endif
+	br = netdev_priv(dev);
+	vg = br_vlan_group_rcu(br);
+	vid = br_get_pvid(vg);
+#if FASTNAT_DEBUG
+	printk("getbrport_bydst() 2");
+#endif
+	
+	//head = &br->hash[br_mac_hash(dest,vid)];
+#if FASTNAT_DEBUG
+	printk("getbrport_bydst() 3");
+#endif
+	//if((fdb = fdb_find_rcu(head, dest,vid)) != NULL)
+	if((fdb = fdb_find_rcu(&(br->fdb_hash_tbl), dest,vid)) != NULL)
+	{
+	
+		if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev && 
+			(fdb->dst->state == BR_STATE_FORWARDING)) //(fdb->dst->flags & BR_HAIRPIN_MODE)
+		{
+			return fdb->dst->dev;
+		}
+	}
+	return dev;
+}
+
+extern void fast_tcpdump(struct sk_buff *skb);
+extern struct neigh_table arp_tbl;
+extern char default_route_name[IFNAMSIZ];
+char default_br_name[IFNAMSIZ] = {0};
+int fast_fwd_ip4addr_conflict(struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	__be32 saddr,daddr,wan_ip,br_ip=0,br_bcast=0;
+	struct net_device* in_dev = NULL;
+	struct net_device* out_dev = NULL;
+	struct ethhdr *eth;
+	struct net_bridge_port *p;
+	struct net_bridge *br = NULL;
+	struct net_device *default_route_dev;
+	struct net_device *default_br_dev;
+	struct in_device *ip_ptr;
+
+	if(iph->version != 4 || skb->indev == NULL)
+	{
+		return 0;
+	}
+	default_route_dev = dev_get_by_name(&init_net, default_route_name);
+	if(default_route_dev == NULL)
+	{
+		return 0;
+	}
+	ip_ptr = __in_dev_get_rtnl(default_route_dev);
+	if(ip_ptr && ip_ptr->ifa_list)
+	{
+		wan_ip = ip_ptr->ifa_list->ifa_local;
+	}
+	else
+	{
+		default_br_name[0] = 0;
+		dev_put(default_route_dev);
+		return 0;
+	}
+	in_dev = skb->indev;
+	saddr = iph->saddr;
+	daddr = iph->daddr;
+	p = br_port_get_rtnl(in_dev);
+	if (p != NULL) 		
+	{
+		br = p->br;
+		if (br && br->dev && strncmp(br->dev->name, default_br_name, IFNAMSIZ-1))
+		{
+			strncpy(default_br_name, br->dev->name, IFNAMSIZ-1);
+		}
+	}
+	default_br_dev = dev_get_by_name(&init_net, default_br_name);
+	if(default_br_dev)
+	{
+		ip_ptr = __in_dev_get_rtnl(default_br_dev);
+		if(ip_ptr && ip_ptr->ifa_list)
+		{
+			br_ip = ip_ptr->ifa_list->ifa_local;
+			br_bcast = ip_ptr->ifa_list->ifa_broadcast;
+		}
+	}
+	else
+	{
+		dev_put(default_route_dev);
+		return 0;
+	}
+	if(br && ((daddr == br_ip) || (daddr == br_bcast) || (daddr == wan_ip)))
+	{
+		//printk("@!@1saddr=%08x,daddr=%08x,br_ip=%08x,br_bcast=%08x,wan_ip=%08x\n",saddr, daddr,  br_ip, br_bcast, wan_ip);
+		if (IPPROTO_UDP == iph->protocol)
+		{
+			struct udphdr *udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+			if(udph->source == 0x4300 || udph->source == 0x4400
+				|| udph->dest == 0x4300 || udph->dest == 0x4400)
+			{
+				//printk("@!@dhcp packet\n");
+				dev_put(default_route_dev);
+				dev_put(default_br_dev);
+				return 0;
+			}
+		}
+		out_dev = default_route_dev;
+		skb_push(skb, ETH_HLEN);
+		eth = (struct ethhdr*)(skb->data);
+		memcpy(eth->h_source, in_dev->dev_addr, ETH_ALEN);
+		memcpy(eth->h_dest, out_dev->dev_addr, ETH_ALEN);
+		fast_tcpdump(skb);
+		skb->dev = out_dev;
+	}
+	else if(in_dev == default_route_dev && ((saddr == br_ip) || (saddr == br_bcast) || (saddr == wan_ip)))
+	{
+		struct neighbour *neigh  = neigh_lookup(&arp_tbl, &daddr, default_br_dev);
+		//printk("@!@2saddr=%08x,daddr=%08x,neigh=%08x,wan_ip=%08x\n",saddr, daddr, neigh, wan_ip);
+		if(neigh)
+		{
+			//printk("@!@neigh=%s\n",neigh->dev->name);
+			out_dev = getbrport_bydst(default_br_dev,neigh->ha);
+			if(out_dev)
+			{
+				//printk("@!@out_dev=%s\n",out_dev->name);
+				skb_push(skb, ETH_HLEN);
+				eth = (struct ethhdr*)(skb->data);
+				memcpy(eth->h_source, out_dev->dev_addr, ETH_ALEN);
+				memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+				//printk("@!@mac=%02x %02x %02x %02x %02x %02x\n",eth->h_dest[0],eth->h_dest[1],eth->h_dest[2],eth->h_dest[3],eth->h_dest[4],eth->h_dest[5]);
+			}
+			neigh_release(neigh);
+		}
+		if(out_dev == NULL)
+		{
+			printk("@!@dev: br port not found\n");
+			dev_put(default_route_dev);
+			dev_put(default_br_dev);
+			return 0;
+		}
+		fast_tcpdump(skb);
+		skb->dev = out_dev;
+	}
+	else
+	{
+		dev_put(default_route_dev);
+		dev_put(default_br_dev);
+		return 0;
+	}
+		
+	eth->h_proto = htons(ETH_P_IP);
+	skb->now_location |= FASTNAT_SUCC;
+	dev_queue_xmit(skb);
+	
+	dev_put(default_route_dev);
+	dev_put(default_br_dev);
+	return 1;
+}
+
+int fast_for_multicast(struct sk_buff *skb)
+{
+	if (skb->indev && !strncmp(skb->indev->name, default_route_name, IFNAMSIZ-1))
+	{
+		struct net_device* dev = NULL;
+		struct net_bridge *br;
+		struct net_bridge_port *p;
+		
+		dev = dev_get_by_name(&init_net, default_br_name);
+		if (dev == NULL || !(dev->priv_flags & IFF_EBRIDGE))
+		{
+			printk("@!@dev: br not found\n");
+			return 0;
+		}
+		br = (struct net_bridge *)netdev_priv(dev);
+		p = br_get_port(br, 1);
+		if(p && p->dev)
+		{
+			struct ethhdr *eth;
+			struct iphdr *iph = ip_hdr(skb);
+			
+			skb_push(skb, ETH_HLEN);
+			eth = (struct ethhdr *)skb->data;
+			memcpy(eth->h_source, p->dev->dev_addr, ETH_ALEN); 
+			ip_eth_mc_map(iph->daddr, eth->h_dest);
+			eth->h_proto = htons(ETH_P_IP);
+			skb->dev = p->dev;
+			skb->now_location |= FASTNAT_SUCC;
+			dev_queue_xmit(skb);
+			dev_put(dev);
+			return 1;
+		}
+		dev_put(dev);
+	}
+	return 0;
+}
+#endif
+
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+	struct net_bridge_fdb_entry *f;
+	struct net_bridge_port *p;
+
+	ASSERT_RTNL();
+
+	p = br_port_get_rtnl(dev);
+	if (!p)
+		return;
+
+	spin_lock_bh(&p->br->hash_lock);
+	hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
+		if (f->dst == p && f->key.vlan_id == vid)
+			clear_bit(BR_FDB_OFFLOADED, &f->flags);
+	}
+	spin_unlock_bh(&p->br->hash_lock);
+}
+EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
diff --git a/upstream/linux-5.10/net/core/SI/net_other.c b/upstream/linux-5.10/net/core/SI/net_other.c
new file mode 100755
index 0000000..a6748c7
--- /dev/null
+++ b/upstream/linux-5.10/net/core/SI/net_other.c
@@ -0,0 +1,1222 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include "../../bridge/br_private.h"
+#include <net/arp.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/netioctl.h>
+#include <net/SI/errno_track.h>
+#include <net/ipv6.h>
+#include <net/SI/net_other.h>
+#include <linux/if_arp.h>
+
+#ifdef NETLINK_UC 
+#include "../../../../drivers/net/fast6/fast6.h"
+#include "../../../../drivers/net/fastnat/fastnat.h"
+#else
+#include <net/SI/fastnat.h>
+#include <net/SI/fast6.h>
+#endif
+#ifdef CONFIG_SPEED_OPT
+extern size_t skb_sys_pool_size(const void *ptr);
+#endif
+unsigned long check_pkt = 0;
+EXPORT_SYMBOL(check_pkt);
+int set_print_pkt = 0;  /*´òÓ¡°üÄÚÈÝ*/
+
+/*¼Ç¼±¨ÎijöÈë¶ÔÐÅÏ¢*/
+struct check_pkt_info skb_insert_info = {0};
+EXPORT_SYMBOL(skb_insert_info);
+struct check_pkt_info skb_unlink_info = {0};
+EXPORT_SYMBOL(skb_unlink_info);
+
+/*½«skbÌø×ªµ½ARP/IPV4Í·²¿,·µ»ØÍøÂç²ãЭÒ飬²»¸Ä±äskb*/
+static unsigned char* skip_mac_header(struct sk_buff *skb, unsigned short *protocol)
+{
+    __be16 next_pro;
+    unsigned char *curr_ptr = NULL;
+
+    if(skb_mac_header_was_set(skb))
+    {
+        curr_ptr = skb_mac_header(skb);
+        curr_ptr += ETH_HLEN;
+        next_pro = *(__be16 *)(curr_ptr - 2);
+    }
+    else
+    {
+        curr_ptr = skb->data + ETH_HLEN;
+        next_pro = *(__be16 *)(curr_ptr - 2);
+    }
+
+again:
+    if (htons(ETH_P_IP) == next_pro || htons(ETH_P_ARP) == next_pro)
+    {
+        *protocol = ntohs(next_pro);
+        return curr_ptr;
+    }
+    //vlan
+    else if (next_pro == cpu_to_be16(ETH_P_8021Q))
+    {
+        curr_ptr += VLAN_HLEN;
+        next_pro = *((__be16 *)(curr_ptr - 2));
+        goto again;
+    }
+    //pppoe 
+    else if (next_pro == htons(ETH_P_PPP_SES))
+    {
+        if (*(curr_ptr + 6) == 0x00 && (*(curr_ptr + 7) == 0x21 || *(curr_ptr + 7) == 0x57))
+        {
+            next_pro = htons(ETH_P_IP);
+            curr_ptr += PPPOE_HEADER_LEN;
+            goto again;
+        }
+    }    
+    return NULL;
+}
+
+
+/*½âÎöDHCPÑ¡Ïî×ֶΣ¬»ñÈ¡¶ÔÓ¦message type,ûÓнâoverload*/
+unsigned char *dhcp_option_get(unsigned char *data, int data_len, int code)
+{
+	unsigned char *opt_ptr;
+	int len;
+	int overload = 0;
+
+	opt_ptr = data;
+	while (1) {
+		if (data_len <= 0)
+			return NULL;
+
+		if (opt_ptr[0] == DHCP_PADDING) 
+        {
+			data_len--;
+			opt_ptr++;
+			continue;
+		}
+		if (opt_ptr[0] == DHCP_END)
+			return NULL;
+        
+		len = 2 + opt_ptr[1];
+		data_len -= len;
+		if (data_len < 0)
+			return NULL;
+
+		if (opt_ptr[0] == code)
+			return opt_ptr + 2;
+
+		opt_ptr += len;
+	}
+
+	return NULL;
+}
+
+
+void print_check_pkt_info(struct check_pkt_info *pkt_info, int num)
+{
+    int i = 0;
+    
+    num = num > MAX_PKT_NUM ? MAX_PKT_NUM : num;
+    printk("\n%10s %10s %10s\n", "Protocol", "MsgType", "Time");
+    for(i = 0; i < num; i++)
+    {
+        printk("%10s %10d %10lu\n", 
+            proto_str[pkt_info->info[i].proto_type], 
+            pkt_info->info[i].msg_type, 
+            pkt_info->info[i].time);
+    }
+}
+EXPORT_SYMBOL(print_check_pkt_info);
+
+/*¼ì²âÊý¾Ý°üÊÇ·ñÊÇÖ¸¶¨ÀàÐ͵İü²¢¼Ç¼ʱ¼ä*/
+/*pkt_info·µ»Ø½âÎöµÄЭÒéÏà¹ØÄÚÈÝ*/
+int check_packet_type(struct sk_buff *skb, struct pkt_info *pkt_info)
+{
+    struct iphdr *ip_hdr = NULL;
+    struct icmphdr *icmphdr = NULL;
+    struct udphdr *udp_hdr = NULL;
+    struct arphdr *arp_hdr = NULL;
+    unsigned char *data_ptr = NULL;
+    unsigned char *opt_ptr = NULL;
+    unsigned short data_len = 0;
+    unsigned short protocol = 0;
+
+    if(0 == check_pkt)
+        return 0;
+
+    memset(pkt_info, 0, sizeof(struct pkt_info));
+
+    /*Ìø¹ýMACÍ·£¬µ½ARP/IPV4*/
+    data_ptr = skip_mac_header(skb, &protocol);
+    if(NULL == data_ptr)
+        return 0;
+
+    if(ETH_P_ARP == protocol)
+    {
+        if(test_bit(PKT_TYPE_ARP_BIT, &check_pkt))
+        {
+            arp_hdr = (struct arphdr *)data_ptr;
+            pkt_info->proto_type = PROTO_TYPE_ARP;
+            pkt_info->msg_type   = ntohs(arp_hdr->ar_op);
+            pkt_info->time       = jiffies;
+            return 1;
+        }
+    }
+    else if(ETH_P_IP == protocol)
+    {   
+        ip_hdr = (struct iphdr *)data_ptr;
+
+        /*¶ÔÓÚ·ÖÆ¬°ü£¬Ö»Ê¶±ðÊׯ¬*/
+        if(ntohs(ip_hdr->frag_off) & IP_OFFSET)
+        {
+            return 0;
+        }
+
+        data_len = ntohs(ip_hdr->tot_len);
+
+        switch(ip_hdr->protocol)
+        {
+            case IPPROTO_UDP:
+                udp_hdr = (struct udphdr *)((unsigned char *)ip_hdr + ip_hdr->ihl * 4);
+                if(test_bit(PKT_TYPE_DHCP_BIT, &check_pkt))
+                {
+                    if((DHCP_CLIENT_PORT == ntohs(udp_hdr->source) && DHCP_SERVER_PORT == ntohs(udp_hdr->dest)) || 
+                        (DHCP_CLIENT_PORT == ntohs(udp_hdr->dest) && DHCP_SERVER_PORT == ntohs(udp_hdr->source)))
+                    {
+                        /*Ìø¹ýUDPÍ·*/
+                        data_ptr = (unsigned char *)udp_hdr + 8;
+                        /*Ìø¹ýDHCP¹Ì¶¨Í·*/
+                        data_ptr += 236;
+                        /*Ìø¹ýmagic cookies*/
+                        data_ptr += 4;
+                        data_len = data_len - ip_hdr->ihl * 4 - 8 - 236 - 4;
+
+                        /*È¡DHCPµÄmessage type*/
+                        opt_ptr = dhcp_option_get(data_ptr, data_len, DHCP_MSG_TYPE);
+                        if(opt_ptr)
+                            pkt_info->msg_type = opt_ptr[0];
+
+                        pkt_info->proto_type = PROTO_TYPE_DHCP;
+                        pkt_info->time = jiffies;
+
+                        return 1;
+                    }
+                }
+                break;
+                
+            case IPPROTO_TCP:
+                break;
+            case IPPROTO_ICMP:
+                icmphdr = (struct icmphdr *)((unsigned char *)ip_hdr + ip_hdr->ihl * 4);
+                if(test_bit(PKT_TYPE_PING_BIT, &check_pkt))
+                {
+                    if(ICMP_ECHOREPLY == icmphdr->type || ICMP_ECHO == icmphdr->type)
+                    {
+                        pkt_info->proto_type = PROTO_TYPE_PING;
+                        pkt_info->msg_type   = icmphdr->type;
+                        pkt_info->time       = jiffies;
+                        return 1;
+                    }
+                }
+                break;
+            default:
+                break;
+        }
+    }
+
+    return 0;
+}
+EXPORT_SYMBOL(check_packet_type);
+
+
+#ifdef CONFIG_NETCTL
+
+void net_print_packet(unsigned char *data, unsigned int len, int flag)
+{
+    int i = 0;
+
+    if(set_print_pkt && net_ratelimit())
+    {
+        if(0 == flag)
+            printk("\nrecv packet:\n");
+        else if(1 == flag)
+            printk("\nsend packet:\n");
+        else
+            printk("\nprint packet:\n");
+
+        for(i = 0; i < len; i++)
+        {
+            if(i % 16 == 0)
+                printk("\n");
+            printk("%2x ", data[i]);
+        }
+    }
+}
+
+
+void track_netlink(struct sk_buff *skb,u32 group)
+{
+	struct nlmsghdr *nlh;	
+
+	nlh = (struct nlmsghdr*)(skb->data);	
+	net_run_track(PRT_RTNL_SEND,"rtnetlink_send,msg_type =%d;group = %d",nlh->nlmsg_type,group);
+}
+
+
+/*½«MACµØÖ·ºÍnet_deviceµØÖ·½øÐбȽÏ
+  ³öÏֵij¡¾°£º
+  1.Êý¾Ý»Ø»·
+  2.Êý¾ÝͨѶÕý³££¬Á½¸öCPEÓÐÏàͬµÄMACµØÖ·
+ */
+void check_macaddr_only(unsigned char *ha, unsigned char ha_len)
+{
+	struct net_device *dev;
+	unsigned char addr_len = 0;
+	unsigned char addr[ETH_ALEN] = {0};
+	
+	if(0 == addr_check)
+	{
+		return;
+	}
+	
+	read_lock(&dev_base_lock);
+
+	for_each_netdev(&init_net, dev)
+	{
+		if(dev->addr_len != ha_len)
+		{
+			//ÐÞ¸ÄÔ­Òò£ºÄ¬ÈÏsit0µÄaddr_len=4ÇÒdev_addrĬÈÏΪȫ0£¬µ±ÆäËûÉ豸macµØÖ·[0:3]ҲȫΪ0ʱ¾Í»á¶ÏÑÔËÀ»ú¡£
+			continue;
+		}
+		//addr_len = dev->addr_len > ha_len ? ha_len : dev->addr_len;
+		addr_len = ha_len;
+		if((addr_len > 0) && !memcmp(dev->dev_addr, ha, addr_len))
+		{
+			addr_len = addr_len > ETH_ALEN ? ETH_ALEN : addr_len;
+			memcpy(addr, ha, addr_len);
+			
+			panic("check_macaddr_only: mac address of pc is same as the device, dev name: %s, mac %x:%x:%x:%x:%x:%x\n", 
+				dev->name, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);		
+			read_unlock(&dev_base_lock);
+			return;
+		}
+	}
+
+	read_unlock(&dev_base_lock);
+
+	return;
+}
+
+
+void skbinfo_add(unsigned char *addr,unsigned int skbinfo_type)
+{
+	struct sk_buff *skb;
+	skbinfo_dbg[skbinfo_type] ++;
+/*
+	if(skb_max_panic && skbinfo_type == SKB_TYPE_ALL)
+	{
+		//´Ë´¦Î´ÓÃspinlock±£»¤£¬Ò»µ©Ê¹Óøù¦ÄÜ£¬ÐèÒªÓÃËø±£»¤
+		if(skbinfo_dbg[SKB_TYPE_ALL] > skb_max_panic)
+			panic("too much skb is alloced,pleasw check data_leak ");
+	}
+*/
+	if(skbinfo_type == SKB_TYPE_DATA)
+	{
+		skb = (struct sk_buff *)addr;
+        //if(skb->isExtern == 0)
+		//    skbinfo_dbg[SKB_DATA_BYTES] += ksize(skb->head);
+        //else
+            skbinfo_dbg[SKB_DATA_BYTES] += skb->data - skb->head + skb->len;
+	}
+	if(skbinfo_type==(skb_info_track&0X7F)) 
+	{ 
+		printk("net resource monitor!!!");   
+		dump_stack(); 
+		if(skb_info_track&0X80)
+			panic("net team dbg panic !!!");
+	} 
+	
+}
+
+
+void skbinfo_del(unsigned char *addr,unsigned int skbinfo_type)
+{
+	struct sk_buff *skb;
+	skbinfo_dbg[skbinfo_type] --;
+	
+	if(skbinfo_type == SKB_TYPE_DATA)
+	{
+		skb = (struct sk_buff *)addr;
+        //if(skb->isExtern == 0)
+		//    skbinfo_dbg[SKB_DATA_BYTES] -= ksize(skb->head);
+        //else
+            skbinfo_dbg[SKB_DATA_BYTES] -= skb->data - skb->head + skb->len;
+	}
+
+}
+
+void netruninfo_add(unsigned char *addr,unsigned int info_type)
+{
+	netruninfo_dbg[info_type] ++;	
+	if(info_type==(net_info_track&0X7F)) 
+	{ 
+		printk("net resource monitor!!!");   
+		dump_stack(); 
+		if(net_info_track&0X80)
+			panic("net team dbg panic !!!");
+	} 
+}
+
+void netruninfo_del(unsigned char *addr,unsigned int info_type)
+{
+	netruninfo_dbg[info_type] --;
+}
+static int filter(void *start1, void *start2, unsigned int len)
+{
+    int i = 100, ret = -1;
+    int *p = start1, *q = start2;
+
+    do {
+        if (!memcmp(p, q, len)) {
+            ret = 0;
+            break;
+        }
+        ++p;
+    }while(i--);
+
+    return ret;
+}
+
+//¸Ãº¯ÊýÓÃÓÚÅжÏskb->network_headerÓÐЧÐÔ£¬·µ»Ø»ñÈ¡ÓÐЧµÄipÍ·
+unsigned char * check_skb_for_dump(struct sk_buff *skb, int *mark)
+{
+    unsigned char * mac_head, *net_head, *tsp_head;
+    //dri->net 0; net->dri 1;
+    mac_head = skb_mac_head(skb);
+    net_head = skb_network_head(skb);
+    tsp_head = skb_transport_head(skb);
+    *mark = 0;
+    if (!skb->dev){
+   //     printk("skb->dev = NULL err in %s.\n", __func__);
+        return NULL;
+    }
+    if (skb->data == NULL){
+        printk("skb->data = NULL err in %s.\n", __func__);
+        return NULL;
+    }
+    if (skb->data == mac_head){
+        if(net_head && net_head < (mac_head + skb->dev->hard_header_len )){
+            *mark = -2;
+            return mac_head + skb->dev->hard_header_len;
+        }
+        *mark = 2;
+         return mac_head + skb->dev->hard_header_len;  
+    }
+    else if(skb->data == net_head){
+        if(mac_head && net_head < (mac_head + skb->dev->hard_header_len )){
+            *mark = -23;
+            return mac_head + skb->dev->hard_header_len;
+        }
+        if(tsp_head && net_head < (tsp_head - 20)){
+            *mark = -3;
+            return net_head;
+        }
+        *mark = 3;
+        return net_head;
+    }
+    else if(skb->data == tsp_head ){
+        if((!net_head) || (net_head && net_head < (tsp_head - 20))){
+            *mark = -4;
+            return NULL;
+        }
+        *mark = 4;
+        return net_head;
+    }
+    else{
+     //   printk("unexpected err in %s\n", __func__);
+        return NULL;
+    }
+        
+}
+
+extern struct nf_conntrack_tuple tuple_info;
+extern int getconn_type;
+
+//ÔÚskb_release_dataʱ½øÐÐÄÚÈݵÄ×Ö·û´®Æ¥Å䣬²¢µ¼³öÕ»¹ì¼£
+void dump_net_stack(struct sk_buff *skb, unsigned int offset)
+{
+    if(getconn_type != 8 && getconn_type != 10)
+        return;
+    int mark = 0;
+    struct iphdr *iphv4;
+    struct ipv6hdr *iphv6;
+    unsigned char *tsp_start = NULL;
+    unsigned char *iph = check_skb_for_dump(skb, &mark);
+    
+    if(getconn_type == 10 && iph){
+        if (skb->now_location & FASTNAT_SUCC){
+            printk("skb->len = %d now_location = %d\n", skb->len, skb->now_location);
+            struct tcphdr *th = (struct tcphdr *)(iph + 20);
+            printk("th->seq=%lu, th->sport=%ld, th->dport=%ld\n", htonl(th->seq), htons(th->source), htons(th->dest));
+            goto out;
+        }
+        return;
+    }
+    if(iph){
+        if((iph[0] & 0xf0) == 0x40){
+            iphv4 = (struct iphdr*)iph;
+            if(tuple_info.dst.protonum && tuple_info.dst.protonum != iphv4->protocol){
+                return;
+            }
+            if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.ip, &iphv4->daddr, 4) != 0){
+                return;
+            }
+            if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.ip, &iphv4->saddr, 4) != 0){
+                return;
+            }
+            tsp_start = (unsigned char*)iphv4 + (iphv4->ihl << 2);
+        }
+        else if((iph[0] & 0xf0) == 0x60){
+            iphv6 = (struct ipv6hdr*)iph;
+            if(tuple_info.dst.protonum && tuple_info.dst.protonum != iphv6->nexthdr){
+                return;
+            }
+            if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.in6, &iphv6->daddr, 16) != 0){
+                return;
+            }
+            if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.in6, &iphv6->saddr, 16) != 0){
+                return;
+            }
+            tsp_start = (unsigned char*)iphv6 + 40;
+        }
+    }
+    //Èç¹ûÊÍ·ÅʱֻÓд«ÊäͷûÓÐÍøÂçÍ·£¬Ö»±È½ÏÓÐЧport
+    if(mark == -4){
+        tsp_start = skb->data; 
+    }
+    if(tsp_start == NULL)
+        return;
+    
+    if(tuple_info.src.u.all && memcmp(&tuple_info.src.u.all, tsp_start, 2) != 0){
+        return;
+    }
+    if(tuple_info.dst.u.all && memcmp(&tuple_info.dst.u.all, tsp_start + 2, 2) != 0) {
+        return;
+    }
+    
+    printk("free skb match mark = %d:\n", mark);
+    if(tuple_info.dst.protonum && mark != -4)
+        printk("protonum = %d ",tuple_info.dst.protonum);
+    if(tuple_info.src.u3.ip && mark != -4){
+        if(iph && (iph[0] & 0xf0) == 0x40){
+            printk("sip: %08x ", ntohl(tuple_info.src.u3.ip));
+        }else if(iph && (iph[0] & 0xf0) == 0x60){
+            printk("sip: %x:%x:%x:%x:%x:%x:%x:%x ", ntohs(tuple_info.src.u3.in6.s6_addr16[0]), ntohs(tuple_info.src.u3.in6.s6_addr16[1]), ntohs(tuple_info.src.u3.in6.s6_addr16[2]), ntohs(tuple_info.src.u3.in6.s6_addr16[3]), 
+                    ntohs(tuple_info.src.u3.in6.s6_addr16[4]), ntohs(tuple_info.src.u3.in6.s6_addr16[5]), ntohs(tuple_info.src.u3.in6.s6_addr16[6]), ntohs(tuple_info.src.u3.in6.s6_addr16[7]));
+        }
+    }
+    if(tuple_info.src.u.all){
+        printk("sport : %d ", ntohs(tuple_info.src.u.all));
+    }
+    if(tuple_info.dst.u3.ip && mark != -4){
+        if(iph && (iph[0] & 0xf0) == 0x40){
+            printk("%dip: %08x ", ntohl(tuple_info.dst.u3.ip));
+        }else if(iph && (iph[0] & 0xf0) == 0x60){
+            printk("dip: %x:%x:%x:%x:%x:%x:%x:%x ", ntohs(tuple_info.dst.u3.in6.s6_addr16[0]), ntohs(tuple_info.dst.u3.in6.s6_addr16[1]), ntohs(tuple_info.dst.u3.in6.s6_addr16[2]), ntohs(tuple_info.dst.u3.in6.s6_addr16[3]), 
+                    ntohs(tuple_info.dst.u3.in6.s6_addr16[4]), ntohs(tuple_info.dst.u3.in6.s6_addr16[5]), ntohs(tuple_info.dst.u3.in6.s6_addr16[6]), ntohs(tuple_info.dst.u3.in6.s6_addr16[7]));
+        }
+    }
+    if(tuple_info.dst.u.all) {
+        printk("dport : %d ", ntohs(tuple_info.dst.u.all));
+    }
+    printk("\n");
+   // if (skb_dump_len) 
+      //  if(!filter((skb->head + offset), skb_dump_str, skb_dump_len))
+out:
+    dump_stack();
+}
+
+
+
+/***********************************************************************************************************/
+/*ÒÔÏÂÎªÍøÂç×éÐÂÔöµÄ½Ó¿Ú£¬ÓÉÓÚñîºÏÇ¿£¬²»ÄܶÀÁ¢³ÉÔ´Îļþ£¬µ«Ðè×¢ÊÍÇå³þ*/
+/***********************************************************************************************************/
+
+extern int set_tcpdump;
+extern char br_name[];
+
+//ÐèÒªskb->mac_headerÒѾ­¸³Öµ
+unsigned int get_network_head_len(struct sk_buff *skb)
+{
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+    unsigned char *buf = (unsigned char *)skb->head + skb->mac_header + ETH_HLEN;
+#else
+    unsigned char *buf = (unsigned char *)skb->mac_header + ETH_HLEN;
+#endif
+
+    if ((((unsigned)buf[0]) & 0xF0) == 0x40)
+        return 20; //ipv4 ipÍ·³¤¶È20
+        
+    if ((((unsigned)buf[0]) & 0xF0) == 0x60)
+        return 40; //ipv6 ipÍ·³¤¶È40
+        
+    return 20; //ĬÈÏipv4´¦Àí
+}
+
+//ËïȪÌí¼Ó£¬²Î¿¼__netif_receive_skb£¬ÓÃÓÚץȡ½ÓÊÕ·½ÏòµÄ¶¨ÖƱ¨ÎÄ£¬ÈçfastnatµÈÈë¿Ú±¨ÎÄ£¬°´ÕÕtcpdumpÕý³£·½Ê½Ê¹Óü´¿É£¬µ«»áÓ°ÏìÐÔÄÜ£¬ËùÒÔÐèÒªÓÃproc±äÁ¿¿ØÖÆ
+void tcpdumpin_sq(struct sk_buff *skb)
+{
+    unsigned char        *data_priv;
+    struct packet_type *ptype;
+    __be16 type;
+    unsigned int len_priv;
+    int dev_flag = 0;
+
+    if (!(set_tcpdump & 1))
+        return;
+    
+    if (list_empty(&ptype_all))
+    {
+        return;
+    }
+
+    //±£´æµ±Ç°±¨ÎĵÄÊý¾ÝÖ¡Í·ÐÅÏ¢£¬×¥°ü½áÊøºó»Ö¸´
+    data_priv = skb->data;
+    len_priv = skb->len;
+    
+    //½«³¤¶ÈºÍdataÖ¸ÏòMACÍ·£¬¸ù¾Ýµ±Ç°Çé¿ö¶¯Ì¬µ÷Õû
+    if (skb->mac_header == 0 || skb->mac_header == ~0U)
+    {
+        skb_reset_mac_header(skb);
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+    }
+    else if (skb->network_header == 0 || skb->network_header == ~0U)
+    {
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+        skb_reset_data_bymachd(skb);
+        skb_push(skb, ETH_HLEN);
+    }
+    else if (skb->transport_header == 0 || skb->transport_header == ~0U)
+    {
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+        skb_reset_data_bymachd(skb);
+        skb_push(skb, ETH_HLEN);
+    }
+    
+    //devΪ¿Õʱ£¬ÎªÁ˱£Ö¤ËùÓе㶼ÄÜ×¥µ½±¨£¬½«°ÑdevÉèΪptype->dev
+    if (skb->dev == NULL)
+    {
+        dev_flag = 1;
+    }
+
+    rcu_read_lock();
+    list_for_each_entry_rcu(ptype, &ptype_all, list) {
+        //Èë¿ÚskbÊÇÍêÕûµÄMACÖ¡£¬²»ÐèÒª½øÐÐÈÎºÎÆ«ÒƵ÷Õû
+        if ((!ptype->dev || !skb->dev || ptype->dev == skb->dev))  // && (ptype->func == packet_rcv)
+        {
+            //packet_rcvÄÚ²¿ÒªÇóskb->dev±ØÐ벻Ϊ¿Õ£¬ËùÒÔ´Ë´¦½øÐÐÁËС¼¼ÇÉ£¬µ«¿ÉÄÜÓжàÓà°ü±»×¥µ½
+            if(skb->dev == NULL && ptype->dev)
+                skb->dev = ptype->dev;
+            else if(skb->dev == NULL)
+                skb->dev = __dev_get_by_name(&init_net, br_name);
+
+            atomic_inc(&skb->users);
+            //track_add(skb, 0, USER_INFO, 0);
+            ptype->func(skb, skb->dev, ptype, skb->dev);
+            if(dev_flag == 1)
+                skb->dev = NULL;
+        }
+    }
+    rcu_read_unlock();
+
+    //»Ö¸´skb³õʼ״̬
+    skb->data = data_priv;
+    skb->len = len_priv;
+    if(dev_flag == 1)
+        skb->dev = NULL;
+}
+
+//ËïȪÌí¼Ó£¬²Î¿¼dev_queue_xmit_nit£¬ÓÃÓÚץȡ·¢ËÍ·½ÏòµÄ¶¨ÖƱ¨ÎÄ£¬ÈçfastnatµÈ³ö¿Ú±¨ÎÄ£¬°´ÕÕtcpdumpÕý³£·½Ê½Ê¹Óü´¿É£¬µ«»áÓ°ÏìÐÔÄÜ£¬ËùÒÔÐèÒªÓÃproc±äÁ¿¿ØÖÆ
+void tcpdumpout_sq(struct sk_buff *skb)
+{
+    struct packet_type *ptype;
+    struct sk_buff *skb2 = NULL;
+    int dev_flag = 0;
+    sk_buff_data_t        transport_header;
+    sk_buff_data_t        network_header;
+    sk_buff_data_t        mac_header;
+    unsigned char        *data_priv;
+    unsigned int len_priv;
+    
+    if (!(set_tcpdump & 2))
+        return;
+    
+    if (list_empty(&ptype_all))
+    {
+        return;
+    }
+
+    //±£´æµ±Ç°±¨ÎĵÄÊý¾ÝÖ¡Í·ÐÅÏ¢£¬×¥°ü½áÊøºó»Ö¸´
+    data_priv = skb->data;
+    len_priv = skb->len;
+    transport_header = skb->transport_header;
+    network_header = skb->network_header;
+    mac_header = skb->mac_header;
+
+    if (skb->mac_header == 0 || skb->mac_header == ~0U)
+    {
+        skb_reset_mac_header(skb);
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+    }
+    else if (skb->network_header == 0 || skb->network_header == ~0U)
+    {
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+        skb_reset_data_bymachd(skb);
+        skb_push(skb, ETH_HLEN);
+    }
+    else if (skb->transport_header == 0 || skb->transport_header == ~0U)
+    {
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+        skb_reset_data_bymachd(skb);
+        skb_push(skb, ETH_HLEN);
+    }
+
+    //TCP²ãÃæÉÐδÀ´µÃ¼°¸³Öµ³ö¿Údev£¬´Ë´¦Ç¿Ðи³ÖµÎªbr0£¬ËùÒÔÒªÇó×¥³ö¿Ú±¨ÎÄʱ£¬Òª²»ÏÞÖÆdev²ÅÐУ¬·ñÔòÍâÍø¿Ú±¨ÎÄ×¥²»µ½
+    if(skb->dev == NULL)
+    {
+        dev_flag = 1;
+    }
+    
+    rcu_read_lock();
+    list_for_each_entry_rcu(ptype, &ptype_all, list) {
+        //ÈôÔÚTCP»òIP²ãÃæ£¬´ËʱdataδָÏòMACÍ·£¬µ«ÓÉÓÚÎÒÃǽøÐÐÁËÆ«ÒÆ£¬Ö¸ÏòÁËMACÍ·£¬ËùÒÔÄÜץȡÍêÕû±¨ÎÄ£¬µ«PPPoEÀ౨ÎÄ£¬ÎÞ·¨×¥È«
+        if ((ptype->dev == skb->dev || !ptype->dev || !skb->dev) && (ptype->af_packet_priv != NULL) &&
+          (struct sock *)ptype->af_packet_priv != skb->sk) // && (ptype->func == packet_rcv)
+        {
+            //packet_rcvÄÚ²¿ÒªÇóskb->dev±ØÐ벻Ϊ¿Õ£¬ËùÒÔ´Ë´¦½øÐÐÁËС¼¼ÇÉ£¬µ«¿ÉÄÜÓжàÓà°ü±»×¥µ½
+            if(skb->dev == NULL && ptype->dev)
+                skb->dev = ptype->dev;
+            else if(skb->dev == NULL)
+                skb->dev = __dev_get_by_name(&init_net, br_name);
+            
+            skb2 = skb_clone(skb, GFP_ATOMIC);
+            if (!skb2)
+                break;
+            ptype->func(skb2, skb->dev, ptype, skb->dev);
+            
+            if (dev_flag == 1)
+                skb->dev = NULL;
+        }
+    }
+    rcu_read_unlock();
+
+    //»Ö¸´skb³õʼ״̬
+    if(dev_flag == 1)
+        skb->dev = NULL;
+    skb->transport_header = transport_header;
+    skb->network_header = network_header;
+    skb->mac_header = mac_header;
+    skb->data = data_priv;
+    skb->len = len_priv;
+}
+
+
+
+
+/*¸ù¾ÝdevÆ¥Åä²éÕÒÁÚ¾Ó±íarp_tblÖеÄÁÚ¾Ó½Úµã*/
+void get_neigh_bydev(struct neigh_table *tbl, struct net_device *dev, struct dev_neigh_info *neigh_info)
+{
+    int i;
+    int len;
+    unsigned int neigh_num = 0;
+    struct neigh_hash_table *nht;
+
+    if(tbl->family != AF_INET && tbl->family != AF_INET6)
+        return;
+
+    rcu_read_lock_bh();
+    nht = rcu_dereference_bh(tbl->nht);
+
+    for(i = 0; i < (1 << nht->hash_shift); i++)
+    {
+        struct neighbour *neigh;
+
+        for(neigh = rcu_dereference_bh(nht->hash_buckets[i]); neigh != NULL; neigh = rcu_dereference_bh(neigh->next))
+        {
+            if(neigh->dev == dev)
+            {
+                len = tbl->key_len > MAX_IPADDR_LEN ? MAX_IPADDR_LEN : tbl->key_len;
+
+                memcpy(neigh_info->neigh_nod[neigh_num].ip_addr, neigh->primary_key, len);
+                neigh_info->neigh_nod[neigh_num].ip_len = len;
+                memcpy(neigh_info->neigh_nod[neigh_num].mac_addr, neigh->ha, MAX_MACADDR_LEN);
+                neigh_num++;
+                if(neigh_num >= 20)
+                    goto end;
+            }
+        }
+    }
+
+end:
+    neigh_info->num = neigh_num;
+
+    rcu_read_unlock_bh();
+
+    return;
+}
+
+/*¸ù¾ÝÔ¶¶ËÁÚ¾ÓµÄMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢*/
+void get_neigh_bymac(struct neigh_table *tbl,mac_addr *addr, struct neigh_info *info)
+{
+    int i;
+    int len;
+    struct neigh_hash_table *nht;
+
+    if(tbl->family != AF_INET && tbl->family != AF_INET6)
+        return;
+
+    nht = rcu_dereference_bh(tbl->nht);
+
+    for(i = 0; i < (1 << nht->hash_shift); i++)
+    {
+        struct neighbour *neigh;
+
+        for(neigh = rcu_dereference_bh(nht->hash_buckets[i]); neigh != NULL; neigh = rcu_dereference_bh(neigh->next))
+        {
+            if(!compare_ether_addr(neigh->ha,addr->addr)) 
+            {
+                len = tbl->key_len > MAX_IPADDR_LEN ? MAX_IPADDR_LEN : tbl->key_len;
+
+                memcpy(info->ip_addr, neigh->primary_key, len);
+                info->ip_len = len;
+                memcpy(info->mac_addr, neigh->ha, MAX_MACADDR_LEN);
+                return;
+            }
+        }
+	}
+}
+
+//»ñȡij2²ãÇŵãÉ豸µÄÁÚ¾ÓÁбíÐÅÏ¢£¬Ïȸù¾Ý³ö¿ÚdevÕÒµ½ËùÓеÄÔ¶³ÌÁÚ¾ÓMACµØÖ·£¬ÔÙ¸ù¾ÝMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢
+void getneigh_ofdev(struct net_device *dst_dev,struct dev_neigh_info  *neigh_info)
+{
+	int i;
+	int neigh_num = 0;
+	struct net_device *br_dev;
+	struct net_bridge *br;
+	
+	br_dev = dev_get_by_name(&init_net, br_name);
+	br = netdev_priv(br_dev);
+	
+	spin_lock_bh(&br->hash_lock);
+	for (i = 0; i < BR_HASH_SIZE; i++) {
+		struct hlist_node *h;
+		hlist_for_each(h, &br->hash[i]) {
+			struct net_bridge_fdb_entry *f;
+
+			f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
+			if (f->dst && f->dst->dev == dst_dev && !(test_bit(0, &f->flags))) {//BR_FDB_LOCAL==0
+			   get_neigh_bymac(&arp_tbl, &f->addr,neigh_info->neigh_nod+neigh_num);
+	                neigh_num++;
+
+			}
+		}
+	}
+	spin_unlock_bh(&br->hash_lock);
+	neigh_info->num = neigh_num;
+}
+
+void update_brport_info(struct devlist_info *dev_info){
+    int i = 0, j = 0, k = 0;
+    struct net_device *br_dev;
+    struct net_device *temp_dev;
+    struct dev_neigh_info *temp_neigh;
+    int temp_count ;
+    for(i = 0; i < dev_info->num; i++){
+        if(dev_info->info[i].dev_layer == BR_DEV){
+            br_dev = dev_get_by_name(&init_net, dev_info->info[i].name);
+            for(j = 0; j < dev_info->info[i].dev_neigh.num; j++){
+                temp_dev = getbrport_bydst(br_dev, dev_info->info[i].dev_neigh.neigh_nod[j].mac_addr);
+                if(!temp_dev || temp_dev->ifindex == br_dev->ifindex){
+                    printk("temp_dev error!!!\n");
+                    continue;
+                }        
+                for(k = 0 ; k < dev_info->num; k++){
+                    if(strcmp(dev_info->info[k].name, temp_dev->name) == 0){
+						temp_neigh = &(dev_info->info[k].dev_neigh);
+						if(temp_neigh->num >= 20){
+							printk("dev=%s , neigh info is full!\n", temp_dev->name);
+						    break;
+						}
+                        temp_count = temp_neigh->num;			
+                        memcpy(temp_neigh->neigh_nod[temp_count].ip_addr, dev_info->info[i].dev_neigh.neigh_nod[j].ip_addr,  dev_info->info[i].dev_neigh.neigh_nod[j].ip_len);
+                        temp_neigh->neigh_nod[temp_count].ip_len = dev_info->info[i].dev_neigh.neigh_nod[j].ip_len;
+                        memcpy(temp_neigh->neigh_nod[temp_count].mac_addr, dev_info->info[i].dev_neigh.neigh_nod[j].mac_addr, MAX_MACADDR_LEN);
+                        temp_count ++;
+                        temp_neigh->num = temp_count;
+                        break;
+                    }
+                }
+            }
+			if(br_dev)
+				dev_put(br_dev);
+        }
+    }
+}
+
+/*»ñÈ¡init_netÖÐÍøÂçÉ豸Ïà¹ØÐÅÏ¢£¬°üÀ¨IP¡¢MAC¡¢³ö¿Údev¡¢ÁÚ¾ÓÁбíµÈ*/
+int get_devlist_info(unsigned long arg)
+{
+    struct devlist_info *dev_info;
+    struct net_device *dev;
+    struct net_device *temp_dev;
+    struct dev_neigh_info *temp_neigh;
+    unsigned int temp_count = 0;
+    unsigned int dev_num = 0;
+	
+    dev_info=(struct devlist_info*)kzalloc(sizeof(struct devlist_info), GFP_KERNEL);
+    if(!dev_info)
+        return -EFAULT;
+
+    read_lock(&dev_base_lock);
+
+    for_each_netdev(&init_net, dev)
+    {
+        if(dev->flags & IFF_UP && strcmp(dev->name, "lo") != 0)
+        {
+        	//¼Ç¼±¾µØÍø¿ÚµÄÐÅÏ¢
+            strcpy(dev_info->info[dev_num].name, dev->name);
+            if(dev->ip_ptr && dev->ip_ptr->ifa_list)
+                dev_info->info[dev_num].ipv4_addr = dev->ip_ptr->ifa_list->ifa_address; 
+            if(dev->header_ops && dev->dev_addr){
+                memcpy(dev_info->info[dev_num].mac_addr, dev->dev_addr, dev->addr_len);
+                if(is_zero_ether_addr(dev->dev_addr))
+                    dev_info->info[dev_num].mac_errtype = ZERO_ADDRERR;
+                else if(is_broadcast_ether_addr(dev->dev_addr))
+                    dev_info->info[dev_num].mac_errtype = BROADCAST_ADDRERR;
+                else if(is_multicast_ether_addr(dev->dev_addr))
+                    dev_info->info[dev_num].mac_errtype = MULTICAST_ADDRERR;              
+            }
+
+		//ÒÔÏÂΪ¸üб¾µØÍø¿Ú¹ØÁªµÄÔ¶³ÌÁÚ¾ÓÁбíÐÅÏ¢
+		//¶ÔÓÚÇŵãÉ豸£¬Ïȸù¾Ý³ö¿ÚdevÕÒµ½ËùÓеÄÔ¶³ÌÁÚ¾ÓMACµØÖ·£¬ÔÙ¸ù¾ÝMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢
+            if(dev->priv_flags & IFF_BRIDGE_PORT){
+		    	dev_info->info[dev_num].dev_layer = L2_DEV;
+                dev_info->info[dev_num].dev_neigh.num = 0;
+		//	getneigh_ofdev(dev,&(dev_info->info[dev_num].dev_neigh));
+            }
+		//br0ÌØÊâÍø¹Ø£¬»ñÈ¡ÆäÁÚ¾ÓÁбí
+            else if(dev->priv_flags & IFF_EBRIDGE){
+                dev_info->info[dev_num].dev_layer = BR_DEV;
+                get_neigh_bydev(&arp_tbl, dev,&(dev_info->info[dev_num].dev_neigh));
+            }
+		//¶ÔÓÚÆÕͨµÄ3²ãÉ豸£¬»ñÈ¡ÆäÁÚ¾ÓÁбíÐÅÏ¢
+            else{
+                //Åųý·Çarp½Ó¿Ú
+                if(!(dev->flags & IFF_NOARP))
+                    get_neigh_bydev(&arp_tbl, dev,&(dev_info->info[dev_num].dev_neigh));
+                dev_info->info[dev_num].dev_layer = L3_DEV;         
+            }
+		    dev_num++;
+            if(dev_num >= MAX_DEV_NUM)
+            {
+                break;
+            }
+        }
+    }
+    dev_info->num = dev_num;
+    update_brport_info(dev_info);
+    read_unlock(&dev_base_lock);
+    
+    if (copy_to_user((char *)arg, dev_info, sizeof(struct devlist_info)))
+    {
+        kfree(dev_info);
+        return -EFAULT;
+    }
+    kfree(dev_info);
+
+    return 0;
+}
+
+extern wait_queue_head_t skb_wait_queue;
+//extern atomic_t  skb_used;
+extern atomic_t  skb_tops;
+extern atomic_t  skb_fromps;
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÉêÇ룬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	netslab_inc(SKB_SLAB);
+	//track_add(skb, 0, SKB_INFO, skb->truesize);
+	skbinfo_add(skb,SKB_TYPE_ALL);
+#endif
+//	atomic_inc(&skb_used);
+}
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÊÍ·Å£¬°üº¬Íⲿ´«µÝÀ´µÄdata
+extern wait_queue_head_t skb_wait_queue;
+void skb_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	track_del(skb, 0, SKB_INFO);
+	skbinfo_del(skb,SKB_TYPE_ALL);
+	netslab_dec(SKB_SLAB);	
+#endif
+	//2017.6.3  add by linxu  set a limit for skb
+//	atomic_dec(&skb_used);
+	if(waitqueue_active(&skb_wait_queue))
+	{
+		wake_up(&skb_wait_queue);		
+	}
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÉêÇë,²»°üº¬ÍⲿPSBUF
+void skbdata_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	skbinfo_add((unsigned char *)skb,SKB_TYPE_DATA);
+	//track_add(skb->head, 0, DATA_INFO, skb->len);
+#endif
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÊÍ·Å,²»°üº¬ÍⲿPSBUF
+void skbdata_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	track_del(skb->head, 0, DATA_INFO);
+	skbinfo_del(skb,SKB_TYPE_DATA);
+#endif
+}
+
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataÉêÇë
+void fromext_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	//track_add(skb, 0, DATA_INFO,  skb->len);
+	skbinfo_add((unsigned char *)skb,SKB_TYPE_FROMCP);
+#endif
+	atomic_inc(&skb_fromps);
+}
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataµÄÊÍ·Å
+void fromext_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	track_del(skb->head, 0, DATA_INFO);
+	skbinfo_del(skb,SKB_TYPE_FROMCP);
+#endif	
+	atomic_dec(&skb_fromps);
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÉêÇë
+void toext_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	skbinfo_add((unsigned char *)skb,SKB_TYPE_TOCP);
+#endif
+	atomic_inc(&skb_tops);
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÊÍ·Å
+void toext_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	skbinfo_del(skb,SKB_TYPE_TOCP);
+#endif
+	atomic_dec(&skb_tops);
+}
+#else
+
+
+
+int set_print = 0;       //ÍøÂç×Ô¶¨ÒåµÄ´òÓ¡¿ª¹Ø
+EXPORT_SYMBOL(set_print);
+
+int set_tcpdump = 0;  //ÈÎÒâµã×¥°ü¿ª¹Ø
+
+//´æ·Åµ±Ç°skbÏà¹ØµÄͳ¼ÆÐÅÏ¢£¬°üÀ¨Ò»Ð©Òì³£Ïà¹ØÐÅÏ¢
+unsigned long skbinfo_dbg[SKB_INFO_MAX]= {0}; 
+
+//¸÷ÖÖÍøÂçÔËÐÐ×ÊÔ´µÄͳ¼ÆÖµ£¬ÓÈÆäÊǸ÷¸ö¹Ø¼ü½á¹¹ÌåµÄÉêÇ룬ÒÔ¹©ÄÚ²¿Ñ§Ï°ºÍ¶¨Î»
+unsigned long netruninfo_dbg[NET_INFO_MAX]= {0}; 
+
+//skbÉêÇëÉÏÏÞµÄÅäÖÃÈ«¾Ö£¬Ä¿Ç°ÉÐδʹÓÃ
+unsigned long skb_max_panic = 0; //skb×ÜÊýÉÏÏÞ£¬³¬³ö»áµ¼ÖÂpanic
+unsigned long skb_num_limit = 6000;  //skb×ÜÊýÉÏÏÞ£¬³¬³ö·µ»ØNULL
+
+//¶ÔÌØ¶¨µÄskbÔÚÊͷŵãʱ½øÐÐÕ»¸ú×Ù
+char skb_dump_str[NIOCTL_MAX_MSGLEN] = {0};
+unsigned int skb_dump_len = 0;
+
+/*¶Ô±¾µØTCP½øÐÐÏà¹ØÍ³¼Æ*/
+unsigned long tcp_stats_dbg[TCP_STATS_MAX] = {0};
+
+
+//ÒÔÏÂΪÊý¾Ý°üµÄ½¨Ä£ÐÅÏ¢£¬ÒÔͳ¼Æ³öÊý¾Ý°üÄ£ÐÍ
+int  skb_num4 = 0;                  //½ÓÊÕµ½µÄV4Êý¾Ý°ü
+int  skb_num6 = 0;                  //½ÓÊÕµ½µÄV6Êý¾Ý°ü
+int  skb_big_num;                   //len³¤¶È³¬¹ý1000µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+int  skb_small_num;                 //len³¤¶ÈСÓÚ100µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+int  skb_bytes4 = 0;                //½ÓÊÕµ½µÄV4Êý¾Ý°ü×Ö½ÚÊý
+int  skb_bytes6 = 0;                //½ÓÊÕµ½µÄV6Êý¾Ý°ü×Ö½ÚÊý
+int  skb_unknown = 0;               //½ÓÊÕµ½µÄδ֪ЭÒéÊý¾Ý°ü£¬°üÀ¨ARPµÈ·ÇV4ºÍV6µÄ±¨ÎÄ
+int  skb_tcpnum = 0;                //½ÓÊÕµ½µÄtcpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+int  skb_udpnum = 0;                //½ÓÊÕµ½µÄudpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+int  broadcast_num4 = 0;            //½ÓÊÕµ½µÄV4¹ã²¥°ü
+int  broadcast_num6 = 0;            //½ÓÊÕµ½µÄV6¹ã²¥°ü
+int  multicast_num4 = 0;            //½ÓÊÕµ½µÄV4×é²¥±¨
+int  multicast_num6 = 0;            //½ÓÊÕµ½µÄV6×é²¥±¨
+int  fastnat_num = 0;               //fastnat³É¹¦µÄ±¨ÎÄ
+int  fast6_num = 0;                 //fast6³É¹¦µÄ±¨ÎÄ
+int  fastbr_num = 0;                //fastbr³É¹¦µÄ±¨ÎÄ
+int  fast_local4_rcv_num = 0;       //±¾µØfast_local4³É¹¦½ÓÊÕ±¨ÎÄ
+int  fast_local6_rcv_num = 0;       //±¾µØfast_local6³É¹¦½ÓÊÕ±¨ÎÄ
+int  fast_local4_output_num = 0;    //±¾µØfast_local4³É¹¦·¢Ëͱ¨ÎÄ
+int  fast_local6_output_num = 0;    //±¾µØfast_local6³É¹¦·¢Ëͱ¨ÎÄ
+int  fast_tcpdump_num = 0;          //fast×¥°üÊýÁ¿
+
+
+int double_mac = 0; //mac¼ì²é¿ª¹Ø
+//slabÄÚ´æÊ¹ÓÃÏà¹ØÍ³¼Æ£¬Î´¿¼ÂÇͨÓÃslabµØÖ·³Ø£¬Èçkmalloc
+struct slab_info slab_count = {0};
+
+/*½øÐÐTCPͳ¼Æ*/
+#define TCP_PKT_STATS_INC(_mod)    tcp_stats_dbg[_mod]++
+
+void dump_net_stack(struct sk_buff *skb, unsigned int offset)
+{
+   
+}
+
+
+void check_macaddr_only(unsigned char *ha, unsigned char ha_len)
+{
+	
+}
+
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÉêÇ룬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_alloc_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÊÍ·Å£¬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_free_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÉêÇë,²»°üº¬ÍⲿPSBUF
+void skbdata_alloc_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÊÍ·Å,²»°üº¬ÍⲿPSBUF
+void skbdata_free_track(struct sk_buff *skb)
+{
+	
+}
+
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataÉêÇë
+void fromext_alloc_track(struct sk_buff *skb)
+{
+	
+
+}
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataµÄÊÍ·Å
+void fromext_free_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÉêÇë
+void toext_alloc_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÊÍ·Å
+void toext_free_track(struct sk_buff *skb)
+{
+	
+}
+
+void net_print_packet(unsigned char *data, unsigned int len, int flag)
+{
+   
+}
+
+
+void tcpdumpin_sq(struct sk_buff *skb)
+{
+    
+}
+
+void track_netlink(struct sk_buff *skb,u32 group)
+{
+
+}
+
+void netslab_inc(int i)
+{
+
+}
+
+void netslab_dec(int i)
+{
+
+}
+
+void netruninfo_add(unsigned char *addr,unsigned int info_type)
+{
+}
+
+void netruninfo_del(unsigned char *addr,unsigned int info_type)
+{
+}
+
+void skbinfo_add(unsigned char *addr,unsigned int skbinfo_type)
+{
+}
+
+void skbinfo_del(unsigned char *addr,unsigned int skbinfo_type)
+{
+}
+
+int net_debug_packet = 0;
+struct timeval net_debug_packet_tv = {0, 0};
+struct list_head net_debug_packet_list_head; 
+int net_debug_packet_sec = 0;
+
+#if 0
+//¼Ç¼ӦÓ÷¢°üÇé¿ö
+void record_app_atcive_net()
+{
+
+}
+#endif
+int get_tcp_stat_info(unsigned long arg)
+{
+    return 0;
+}
+#endif
+
+#ifdef _USE_TestHarness 
+int *vir_addr_ddrnet = 0;
+
+void psnet_freepsbuf(void *head)
+{
+}
+#endif
\ No newline at end of file
diff --git a/upstream/linux-5.10/net/core/fastproc/fast4_fw.c b/upstream/linux-5.10/net/core/fastproc/fast4_fw.c
new file mode 100755
index 0000000..0ef32e4
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast4_fw.c
@@ -0,0 +1,829 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast_common.h>
+#include <net/inet_hashtables.h>
+#include <linux/igmp.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+
+#include <linux/netfilter/xt_tcpudp.h>
+
+MODULE_LICENSE("GPL");
+
+
+static struct task_struct *ct_iptables_syn;
+static struct tasklet_struct ct_iptables_bh;
+unsigned int ct_iptables_syn_sw;
+enum table_index {
+	IPTABLE_RAW,
+	IPTABLE_MANGLE,
+	IPTABLE_NAT,
+	IPTABLE_FILTER
+};
+
+
+/* ***************** ipv4 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ********************************/
+#ifndef CONFIG_PREEMPT_RT_FULL
+extern int *vir_addr_ddrnet;
+#endif
+/* ******************************** º¯ÊýÉêÃ÷ ********************************/
+
+
+/* ******************************** º¯ÊýʵÏÖ ********************************/
+int dst_expire_count = 0;
+extern int no_neighbour;
+
+static inline int rt_is_expired(struct rtable *rth)
+{
+    return rth->rt_genid != atomic_read(&(dev_net(rth->dst.dev))->ipv4.rt_genid);
+}
+
+void __flush_dcache_area(void *addr, size_t len)
+{
+      //´ò×®º¯Êý,ºóÐø¿´ÈçºÎʹÓà  
+}
+
+
+extern void ntl_ct_set_iw(struct sk_buff *skb, struct nf_conn *ct, int ct_dir);
+int fast4_fw_recv(struct nf_conn *tmpl,
+                  struct sk_buff *skb,
+                  struct nf_conn *ct,
+                  struct nf_conntrack_l4proto *l4proto,
+                  unsigned int dataoff,
+                  int dir,
+                  u_int8_t protonum)
+{
+    struct iphdr *iph = ip_hdr(skb);
+    struct udphdr *udph = NULL;
+    struct tcphdr *tcph = NULL;
+    __sum16 *cksum = NULL;
+    __be32 *oldip = NULL;
+    __be16 *oldport = 0;
+    struct net_device *dev = NULL;
+    u_int32_t skip_nat = 0;
+
+    enum ip_conntrack_info ctinfo;
+    int ret;
+    int rdir;
+    int type;
+    u_int32_t      nat_addr;
+    u_int16_t      nat_port;
+    struct ethhdr * eth;
+    struct dst_entry *dst_dir = NULL, *dst_rdir = NULL;
+    struct neighbour *_neighbour = NULL;
+	
+	__be16			vlan_proto_raw = skb->vlan_proto;
+	__u16			vlan_tci_raw = skb->vlan_tci;
+
+    /*²Î¿¼tcf_ipt_act()*/
+    struct nf_hook_state state = {
+        .hook = NF_INET_PRE_ROUTING,
+        .net = &init_net,
+        .in	= skb->dev,
+        .pf	= NFPROTO_IPV4,
+    };
+
+
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+    dst_dir = dst_get_by_ct(ct, dir);
+
+    if (!dst_dir)
+    {
+        goto err_out;
+    }
+
+    if (rt_is_expired((struct rtable*)dst_dir))
+    {
+        dst_expire_count++;
+        fast_fw_conn_release(ct);
+        goto err_out;
+    }
+
+    // Èç¹û¼Ç¼ÁËÇŵ㣬¾ÍÖ±½ÓÖ¸ÏòÇŵã
+    if (ct->fast_ct.fast_brport[dir])
+    {
+        rcu_read_lock();
+        dev = rcu_dereference_protected(ct->fast_ct.fast_brport[dir], 1);
+        rcu_read_unlock();
+    }
+    else {
+        dev = dst_dir->dev;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+
+    if (!dev || (skb->len > dev->mtu))
+    {
+        skbinfo_add(NULL, SKB_OVER_MTU);
+        goto err_out;
+    }
+
+    //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+    if (strcmp(skb->dev->name, dev->name) == 0)
+    {
+        skbinfo_add(NULL, SKB_LOOP);
+
+        kfree_skb(skb);
+        goto drop_packet;
+    }
+
+    //²Î¿¼resolve_normal_ct
+    if (dir == 1) {
+        ctinfo = IP_CT_ESTABLISHED_REPLY;
+    } else {
+        if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+            ctinfo = IP_CT_ESTABLISHED;
+        } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+            ctinfo = IP_CT_RELATED;
+        } else {
+            ctinfo = IP_CT_NEW;
+        }
+    }
+
+
+    ret = nf_conntrack_handle_packet_fast(ct, skb, dataoff, ctinfo, &state);
+    if (ret <= 0) {
+        skb->_nfct = 0;
+        goto err_out; // fastʧ°Üǰ¶¼²»Äܸü¸ÄskbµÄÄÚÈÝ£¬·ñÔòʧ°Ü¾ÍÒª×ö»Ö¸´²Ù×÷
+    }
+    //Åжϳö¿ÚdevµÄÍ·²¿¿Õ¼äÊÇ·ñ×ã¹»£¬²»¹»ÐèÒªexpand
+    if (!(skb = fast_expand_headroom(skb, dev))) {
+        goto drop_packet;
+    }
+
+    fast_tcpdump(skb);
+
+    //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+    if (skb_cloned(skb))
+    {
+        if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+        {
+            print_sun(SUN_DBG, "fast4_fw_recv clone copy failed !!!\n");
+            kfree_skb(skb);
+            goto drop_packet;
+        }
+        clean_cache(skb->data,skb->len);
+    }
+
+    iph = ip_hdr(skb);
+
+    //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+    skb_reset_network_header(skb);
+    skb->isFastnat = 1;
+    nf_ct_set(skb, (struct nf_conn *)&ct->ct_general, ctinfo);
+
+    if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
+    {
+        if(IP_CT_DIR_ORIGINAL == dir)
+        {
+            nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+            nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+            type = FN_TYPE_SRC;
+        }
+        else
+        {
+            nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+            nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+            type = FN_TYPE_DST;
+        }
+    }
+    else if (test_bit(IPS_DST_NAT_BIT, &ct->status))
+    {
+        if (IP_CT_DIR_ORIGINAL == dir)
+        {
+            nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+            nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+            type = FN_TYPE_DST;
+        }
+        else
+        {
+            nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+            nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+            type = FN_TYPE_SRC;
+        }
+    }
+    else
+    {
+        skip_nat = 1;
+    }
+
+    if (!skip_nat)
+    {
+        /*½øÐÐnatת»»*/
+        if (IPPROTO_TCP == iph->protocol)
+        {
+            tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+            cksum = &tcph->check;
+            oldport = (FN_TYPE_SRC == type)? (&tcph->source): (&tcph->dest);
+        }
+        else if (IPPROTO_UDP == iph->protocol)
+        {
+            udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+            cksum = &udph->check;
+            oldport = (FN_TYPE_SRC == type)? (&udph->source): (&udph->dest);
+        }
+
+        oldip = (FN_TYPE_SRC == type)? (&iph->saddr) : (&iph->daddr);
+
+        if (cksum != NULL && (0!=*cksum || IPPROTO_TCP == iph->protocol))
+        {
+            inet_proto_csum_replace4(cksum, skb, *oldip, nat_addr, 0);
+            inet_proto_csum_replace2(cksum, skb, *oldport, nat_port, 0);
+        }
+        csum_replace4(&iph->check, *oldip, nat_addr);
+        if(oldport)
+            *oldport = nat_port;
+        *oldip = nat_addr;
+    }
+    else
+    {
+        if (IPPROTO_TCP == iph->protocol)
+        {
+            tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+        }
+    }
+
+	//»ùÓÚctÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+    ct->packet_info[dir].packets++;
+    ct->packet_info[dir].bytes += skb->len;
+    //»ùÓÚÍø¿ÚµÄÁ÷Á¿Í³¼Æ  --- ²Î¿¼linuxÔ­ÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+    if (fastnat_level == FAST_NET_DEVICE)
+    {
+        skb->dev->stats.rx_packets++;
+        skb->dev->stats.rx_bytes += skb->len;
+    }
+
+
+    if (dev->flags & IFF_UP)
+    {
+        if (!(dev->flags & IFF_POINTOPOINT)) {
+            skb_push(skb, ETH_HLEN);
+			skb_reset_mac_header(skb);
+			if(skb->isvlan == 1)
+			{
+				struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr*)(skb->data - VLAN_HLEN);
+				skb->vlan_proto = vlan_eth->h_vlan_proto;
+				skb->vlan_tci = ntohs(vlan_eth->h_vlan_TCI);				
+			}
+            eth = (struct ethhdr *)skb->data;
+            _neighbour = dst_neigh_lookup_skb(dst_dir, skb);
+            //³ö¿Údev macµØÖ·×÷ΪÊý¾Ý°üÔ´macµØÖ·
+            memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+            if (_neighbour)
+            {
+                memcpy(eth->h_dest, _neighbour->ha, ETH_ALEN);
+                neigh_release(_neighbour);
+            }
+            else {
+                __flush_dcache_area(skb->data, skb->len);
+                kfree_skb(skb);
+                no_neighbour++;
+                goto drop_packet;
+            }
+            eth->h_proto = htons(ETH_P_IP);
+        }
+        skb->dev = dev;
+        skb->now_location |= FASTNAT_SUCC;
+
+	if(ct->indev[dir] == NULL && skb->indev != NULL)
+	{
+		ct->indev[dir] = skb->indev;
+	}
+
+	if(ct->outdev[dir] == NULL && skb->dev != NULL)
+	{
+		ct->outdev[dir] = skb->dev;
+	}
+
+	skb->vlan_proto = vlan_proto_raw;
+	skb->vlan_tci = vlan_tci_raw;
+        __flush_dcache_area(skb->data, skb->len);
+        spin_unlock_bh(&fast_fw_spinlock);
+        dev_queue_xmit(skb);
+		spin_lock_bh(&fast_fw_spinlock);
+    }
+    else
+    {
+        print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+        kfree_skb(skb);
+    }
+
+
+    print_sun(SUN_DBG, "skb : 0x%x, new fastnat succ--------", skb);
+
+succ_out:
+drop_packet:
+    if (tmpl)
+        nf_conntrack_put(&tmpl->ct_general);
+    dst_release(dst_dir);
+    return 1;
+
+err_out :
+    dst_release(dst_dir);
+    nf_conntrack_put(&ct->ct_general);
+    print_sun(SUN_DBG, "skb : 0x%x, new fastnat FAIL!!!!!!!!!!", skb);
+    if (tmpl) {
+        nf_ct_set(skb, (struct nf_conn *)&tmpl->ct_general, IP_CT_NEW);
+    }
+    else {
+        skb->_nfct = 0;
+    }
+    return 0; /* not fast nat */
+}
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÏà¹ØÊý¾ÝµÄ¸³Öµ
+unsigned int napt_handle4_fw(void *priv,
+                             struct sk_buff *skb,
+                             const struct nf_hook_state *state)
+{
+    struct nf_conn *ct;
+    enum ip_conntrack_info ctinfo;
+    u_int8_t protocol;
+    enum ip_conntrack_dir dir, rdir;
+    struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+    u_int32_t mask =0;
+#endif
+    struct neighbour *_neighbour = NULL;
+    struct net_device *out = state->out;
+
+
+    //¿ìËÙת·¢×Ü¿ª¹Ø
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        return NF_ACCEPT;
+    }
+
+    //Ö»ÓÐTCP¡¢UDPÖ§³Öfast£¬ICMPÕÒµ½µÄct¿ÉÄÜÊÇTCP¡¢UDPµÄ£¬ÀýÈç: ¶Ë¿Ú²»¿É´ï£¬ËùÒÔ±ØÐëÏÔʾÅжÏ
+    if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+    {
+        return NF_ACCEPT;
+    }
+    //¿ìËÙת·¢×Ó¹¦ÄÜλͼ¿ª¹Ø
+
+    if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch)
+            || !test_bit(FAST_TYPE_FW4_BIT, &fast_switch) )
+    {
+        return NF_ACCEPT;
+    }
+
+    if (!out)
+    {
+        return NF_ACCEPT;
+    }
+
+    //¹ã²¥¡¢×é²¥²»½¨Á´
+    if (ipv4_is_multicast(ip_hdr(skb)->daddr) || ipv4_is_lbcast(ip_hdr(skb)->daddr))
+    {
+        return NF_ACCEPT;
+    }
+
+    if(!dst)
+    {
+        return NF_ACCEPT;
+    }
+
+    _neighbour = dst_neigh_lookup_skb(dst, skb);
+    if (!_neighbour)
+    {
+        return NF_ACCEPT;
+    }
+    if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+    {
+        if (!(skb->dev->flags & IFF_POINTOPOINT))
+            goto accept;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+    if (dst->dev && (skb->len > dst->dev->mtu))
+    {
+        goto accept;
+    }
+
+    ct = nf_ct_get(skb, &ctinfo);
+
+    if (!ct)
+    {
+        goto accept;
+    }
+
+    protocol = nf_ct_protonum(ct);
+
+    if (ct->master == NULL)
+    {
+        struct nf_conn_help *temp_help = nfct_help(ct);
+        //¶ÔÓÚijÌõÁ´½ÓÉÏ´æÔÚhelpµÈ¹³×Ó£¬±ØÐë½»ÓÉlinux±ê×¢Äں˴¦Àí£¬·ñÔòÄں˲»ÄÜ»ñÈ¡Ïà¹ØµÄÊý¾Ý°üÐÅÏ¢
+        if(temp_help!=NULL)
+        {
+            goto accept;
+        }
+    }
+
+    /* only forward */
+    if (!skb->skb_iif)
+    {
+        goto accept;
+    }
+
+    //¹ýÂ˲»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü,¸ù¾Ý¶Ë¿ÚºÅ½øÐйýÂË
+    if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+    {
+        goto accept;
+    }
+
+    dir = CTINFO2DIR(ctinfo);
+
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+
+    /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+    if (IPPROTO_TCP == protocol)
+    {
+        /*TCPÈý´ÎÎÕÊֳɹ¦*/
+        if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+        {
+            goto accept;
+        }
+    }
+    else if (IPPROTO_UDP != protocol)
+    {
+        goto accept;
+    }
+
+    spin_lock_bh(&fast_fw_spinlock);
+    if (ct->fast_ct.fast_dst[dir] && (ct->fast_ct.fast_dst[dir] != dst))
+    {
+        fast_fw_conn_release(ct);
+    }
+
+    if (!ct->fast_ct.fast_dst[dir])
+    {
+        rcu_assign_pointer(ct->fast_ct.fast_dst[dir], dst);
+        ct->fast_ct.fast_brport[dir] = getBridgePort(_neighbour, out);
+        fast_dst_add_ct(dst, ct);
+    }
+
+    ct->fast_ct.isFast = FAST_CT_FW4;
+    spin_unlock_bh(&fast_fw_spinlock);
+
+accept:
+
+    neigh_release(_neighbour);
+    return NF_ACCEPT;
+}
+
+static struct nf_hook_ops fast4_fw_hook = {
+    .hook = napt_handle4_fw,
+    //.owner = THIS_MODULE,
+    .pf = PF_INET,
+    .hooknum = NF_INET_POST_ROUTING,
+    .priority = NF_IP_PRI_LAST,
+};
+
+static inline bool
+port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
+{
+	return (port >= min && port <= max) ^ invert;
+}
+
+static bool ctable_mt(struct nf_conn* ct, struct xt_action_param *par,int dir, int* match_filter)
+{
+	const struct xt_tcp *tcpinfo = par->matchinfo;
+
+	if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
+			ntohs(ct->tuplehash[dir].tuple.src.u.tcp.port),
+			!!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
+		return false;
+	if((tcpinfo->spts[0] || tcpinfo->spts[1]) ^ (!!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
+		*match_filter = 1;
+	if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
+			ntohs(ct->tuplehash[dir].tuple.dst.u.tcp.port),
+			!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
+		return false;
+	if((tcpinfo->dpts[0] || tcpinfo->dpts[1]) ^ (!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
+		*match_filter = 1;
+	return true;
+}
+
+static inline bool
+ip_packet_match(const struct ipt_ip *ipinfo,struct nf_conn* ct,int dir, int* match_filter)
+{
+	unsigned long ret;
+	if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+		    (ct->tuplehash[dir].tuple.src.u3.ip & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+	    NF_INVF(ipinfo, IPT_INV_DSTIP,
+		    (ct->tuplehash[dir].tuple.dst.u3.ip & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+		return false;
+	if(ipinfo->src.s_addr || ipinfo->dst.s_addr)
+		*match_filter = 1;
+
+	if(ct->indev[dir] != NULL)
+	{
+		ret = ifname_compare_aligned(ct->indev[dir]->name, ipinfo->iniface, ipinfo->iniface_mask);
+	}
+	
+	if(ipinfo->iniface[0] != '\0')
+		*match_filter = 1;
+		
+	if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
+		return false;
+	if(ct->outdev[dir] != NULL)
+	{
+		ret = ifname_compare_aligned(ct->outdev[dir]->name, ipinfo->outiface, ipinfo->outiface_mask);
+	}
+
+	if(ipinfo->outiface[0] != '\0')
+		*match_filter = 1;
+
+	if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
+		return false;
+
+	if (ipinfo->proto &&
+	    NF_INVF(ipinfo, IPT_INV_PROTO, ct->tuplehash[dir].tuple.dst.protonum != ipinfo->proto))
+		return false;
+
+	return true;
+}
+
+static inline bool
+ip_packet_match_neg(const struct ipt_ip *ipinfo,struct nf_conn* ct,int dir, int* match_filter)
+{
+	unsigned long ret;
+	if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+		    (ct->tuplehash[dir].tuple.dst.u3.ip & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+	    NF_INVF(ipinfo, IPT_INV_DSTIP,
+		    (ct->tuplehash[dir].tuple.src.u3.ip & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+		return false;
+	if(ipinfo->src.s_addr || ipinfo->dst.s_addr)
+		*match_filter = 1;
+
+	if(ct->outdev[dir] != NULL)
+	{
+		ret = ifname_compare_aligned(ct->outdev[dir]->name, ipinfo->iniface, ipinfo->iniface_mask);
+	}
+	
+	if(ipinfo->iniface[0] != '\0')
+		*match_filter = 1;
+		
+	if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
+		return false;
+	if(ct->indev[dir] != NULL)
+	{
+		ret = ifname_compare_aligned(ct->indev[dir]->name, ipinfo->outiface, ipinfo->outiface_mask);
+	}
+
+	if(ipinfo->outiface[0] != '\0')
+		*match_filter = 1;
+
+	if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
+		return false;
+
+	if (ipinfo->proto &&
+	    NF_INVF(ipinfo, IPT_INV_PROTO, ct->tuplehash[dir].tuple.dst.protonum != ipinfo->proto))
+		return false;
+
+	return true;
+}
+
+static inline struct ipt_entry *
+get_entry(const void *base, unsigned int offset)
+{
+	return (struct ipt_entry *)(base + offset);
+}
+
+static inline
+struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
+{
+	return (void *)entry + entry->next_offset;
+}
+
+
+
+//ͬ²½ctͳ¼ÆÐÅÏ¢µ½iptables,ͬ²½ÖÜÆÚÔݶ¨1s
+static int ct_iptables_syn_thread(void *param)
+{
+	while (1) 
+	{
+		if(ct_iptables_syn_sw)
+		{
+			rcu_read_lock();
+			tasklet_schedule(&ct_iptables_bh);
+			rcu_read_unlock();
+			//ÿ´Îͬ²½¼ä¸ôΪ1s.
+		}
+		msleep(1*1000);
+	}
+	return 0;
+}
+
+void ct_iptables_syn_handle(struct nf_conn *ct,struct xt_table_info *private,int table_id)
+{
+	void *table_base;
+	struct ipt_entry *e;
+	const struct xt_entry_match *ematch;
+	struct xt_action_param acpar;
+	struct xt_counters *counter;
+	int match_flag = 0;
+	int match_filter = 0;
+	int num = 0;
+
+
+	table_base = private->entries;
+	num = private->number;
+	switch(table_id)
+	{
+		case 0:
+		case 1:
+		case 2:
+			e = get_entry(table_base, private->hook_entry[NF_INET_PRE_ROUTING]);
+			break;
+		case 3:
+			e = get_entry(table_base, private->hook_entry[NF_INET_LOCAL_IN]);
+			break;
+		default:
+			break;
+	}
+	while(num--)
+	{
+		match_flag = 0;
+		match_filter = 0;
+		if(!ip_packet_match(&e->ip, ct, IP_CT_DIR_ORIGINAL,&match_filter) &&
+			!ip_packet_match_neg(&e->ip, ct, IP_CT_DIR_REPLY,&match_filter))
+		{
+		}
+		else
+		{
+			xt_ematch_foreach(ematch, e) 
+			{
+				acpar.matchinfo = ematch->data;
+				if (!ctable_mt(ct, &acpar, IP_CT_DIR_ORIGINAL,&match_filter))
+				{
+					match_flag = 1;
+					break;
+				}
+				else
+				{
+				}
+			}
+			if(!match_flag)
+			{
+				if(match_filter)
+				{
+					counter = xt_get_this_cpu_counter(&e->counters);
+					ADD_COUNTER(*counter, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes,ct->packet_info[IP_CT_DIR_ORIGINAL].packets);
+				}
+				e = ipt_next_entry(e);
+				continue;
+			}
+			match_flag = 0;
+			match_filter = 0;
+		}
+		
+		if (!ip_packet_match(&e->ip, ct, IP_CT_DIR_REPLY,&match_filter) &&
+			!ip_packet_match_neg(&e->ip, ct, IP_CT_DIR_ORIGINAL,&match_filter))
+		{
+			e = ipt_next_entry(e);
+			continue;
+		}
+		else
+		{
+			xt_ematch_foreach(ematch, e) 
+			{
+				acpar.matchinfo = ematch->data;
+				if (!ctable_mt(ct, &acpar, IP_CT_DIR_REPLY,&match_filter))
+				{
+					match_flag = 1;
+					break;
+				}
+			}
+			if(!match_flag)
+			{
+				if(match_filter)
+				{
+					counter = xt_get_this_cpu_counter(&e->counters);
+					ADD_COUNTER(*counter, ct->packet_info[IP_CT_DIR_REPLY].bytes,ct->packet_info[IP_CT_DIR_REPLY].packets);
+				}
+				e = ipt_next_entry(e);
+				continue;
+			}
+		}
+	}
+
+}
+
+static void ct_iptables_bhfunc(unsigned long param)
+{
+	int hash = 0;
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_node *n;
+	struct nf_conn *ct;
+	struct xt_table_info *private;
+	struct net * net;
+	unsigned int addend;
+	local_bh_disable();
+	addend = xt_write_recseq_begin();
+	for(hash = 0; hash < nf_conntrack_htable_size; hash++)
+    {
+	   	hlist_nulls_for_each_entry_rcu(h,n,&nf_conntrack_hash[hash],hnnode)
+		{
+	   		if(h)
+			{
+	   			ct = nf_ct_tuplehash_to_ctrack(h);
+				if(ct->fast_ct.isFast)
+				{
+					net = nf_ct_net(ct);
+					private = READ_ONCE(net->ipv4.iptable_raw->private);
+					ct_iptables_syn_handle(ct,private,IPTABLE_RAW);
+					private = READ_ONCE(net->ipv4.iptable_mangle->private);
+					ct_iptables_syn_handle(ct,private,IPTABLE_MANGLE);
+					private = READ_ONCE(net->ipv4.nat_table->private);
+					ct_iptables_syn_handle(ct,private,IPTABLE_NAT);
+					private = READ_ONCE(net->ipv4.iptable_filter->private);	
+					ct_iptables_syn_handle(ct,private,IPTABLE_FILTER);
+				}
+				else
+					continue;
+				spin_lock_bh(&fast_fw_spinlock);
+				ct->packet_info[IP_CT_DIR_ORIGINAL].bytes = 0;
+				ct->packet_info[IP_CT_DIR_ORIGINAL].packets = 0;
+				ct->packet_info[IP_CT_DIR_REPLY].bytes = 0;
+				ct->packet_info[IP_CT_DIR_REPLY].packets = 0;
+				spin_unlock_bh(&fast_fw_spinlock);
+			}
+	   	}
+    }
+	xt_write_recseq_end(addend);
+	local_bh_enable();
+}
+
+
+int fast4_fw_init(void)
+{
+    int ret = 0;
+
+    ret = nf_register_net_hook(&init_net, &fast4_fw_hook);
+    if (ret != 0)
+    {
+        print_sun(SUN_ERR,"init fast4_fw_init failed\n");
+        return -EINVAL;
+    }
+    print_sun(SUN_DBG,"init fast4_fw_init done\n");
+	
+	ct_iptables_bh.func = ct_iptables_bhfunc;
+	ct_iptables_syn = kthread_create(ct_iptables_syn_thread, (void *)0, "ct_iptables_syn" );
+    if (!IS_ERR(ct_iptables_syn))
+    {
+        printk("ntl_syn_task thread's init is succ");
+        wake_up_process(ct_iptables_syn);
+    }
+
+    return 0;
+}
+
+int fast4_fw_cleanup(void)
+{
+    fast_release_all(RELEASE_ALL_DST);
+    nf_unregister_net_hook(&init_net, &fast4_fw_hook);
+	if (ct_iptables_syn) 
+	{
+    	kthread_stop(ct_iptables_syn);
+    	ct_iptables_syn = NULL;
+	}
+	tasklet_kill(&ct_iptables_bh);
+    return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast6.c b/upstream/linux-5.10/net/core/fastproc/fast6.c
new file mode 100755
index 0000000..d5fab9c
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast6.c
@@ -0,0 +1,626 @@
+/* * Copyright (c) 2011 Qualcomm Atheros, Inc. * */
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast6.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/print_sun.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/SI/net_track.h>
+
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv6 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ********************************/
+spinlock_t fast6_spinlock; //×ÔÐýËø£¬±£»¤Á´±íµÄ²Ù×÷
+fast_list_t working_list6 = {0};
+struct hlist_nulls_head *working_hash6;
+
+/* ******************************** º¯ÊýÉêÃ÷ ********************************/
+
+
+/* ******************************** º¯ÊýʵÏÖ ********************************/
+// ipv6±¨Í·À©Õ¹Ñ¡ÏÅжÏÊÇ·ñ°üº¬l4head
+static int ip6nol4head(int type)
+{
+    int i, count;
+    int optarray[] = {IPPROTO_ESP}; //ÔÝʱµ÷ÊÔÖ»ÖªµÀESP£¬½«À´Ñо¿Ð­ÒéºóÀ©Õ¹
+
+    count = sizeof(optarray)/sizeof(optarray[0]);
+    for (i = 0; i < count; i++)
+    {
+        if (type == optarray[i])
+            return (1);
+    }
+    return (0);
+}
+
+/*ÅжÏÊÇ·ñÊÇIPV6À©Õ¹Í·*/
+static int ip6option(int type)
+{
+    int i, optarray[8] = {IPPROTO_HOPOPTS, IPPROTO_IPV6, IPPROTO_ROUTING, IPPROTO_FRAGMENT,
+            IPPROTO_ESP, IPPROTO_AH, IPPROTO_DSTOPTS, IPPROTO_NONE};
+    
+    for (i = 0; i < 8; i++)
+    {
+        if (type == optarray[i])
+            return(optarray[i]);
+    }
+    return (0);
+}
+
+//skb->dataÐèÒªÖ¸ÏòipÍ·
+/*Ìø¹ýIPV6Í·¼°À©Õ¹Í·£¬Ö¸ÏòL4Í·,²¢·µ»ØL4ЭÒéÀàÐÍ*/
+unsigned char *getipv6uppkg(unsigned char *ippkg, unsigned char *protocol, int *uppkglen)
+{
+    unsigned char *ippkgpos = ippkg + 40;
+    struct ip6_hdr *hdr = (struct ip6_hdr *)ippkg;
+    struct ip6_opthdr *opthdr;
+    int ip6hdrlen;
+    int proto = 0;
+
+    proto = ip6option(hdr->ip6_nxt);
+    if (proto)
+    {
+        return NULL;
+#if 0
+        if (ip6nol4head(proto))
+            return NULL;
+
+        opthdr =(struct ip6_opthdr *)ippkgpos;
+        while (proto = ip6option(opthdr->nxt))
+        {
+            if (ip6nol4head(proto))
+                return NULL;
+            ippkgpos += (opthdr->len + 1) << 3;
+            opthdr = (struct ip6_opthdr *)ippkgpos;
+        }
+        if (protocol)
+            *protocol = opthdr->nxt;
+        ippkgpos += (opthdr->len + 1) << 3;
+#endif
+    }
+    else
+        if (protocol)
+            *protocol = hdr->ip6_nxt;
+        
+    ip6hdrlen = ippkgpos - ippkg;
+    
+    if (uppkglen)
+        *uppkglen = ntohs(hdr->ip6_plen) + 40 - ip6hdrlen;
+    
+    return (ippkgpos);
+}
+
+/*»ñÈ¡IPV6ÎåÔª×éÐÅÏ¢£¬Ä¿Ç°½ö´¦ÀíTCP/UDP/ICMP°ü*/
+int fast6_get_tuple(struct sk_buff *skb, struct nf_conntrack_tuple *tuple)
+{
+    if (!skb || !tuple)
+    {
+        return -1;
+    }
+    __u8 next_hdr;
+    unsigned char *l4head;
+    struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+    struct udphdr *udph = NULL;
+    struct tcphdr *tcph = NULL;
+    struct icmp6hdr *icmph = NULL;
+
+
+    /* only IPv6 packets */    
+    if (htons(ETH_P_IPV6) != skb->protocol)
+    {
+        return -1;
+    }
+    
+    if (skb->len - sizeof(struct ipv6hdr) >= 0)
+    {
+        l4head = getipv6uppkg(skb->data, &next_hdr, NULL);
+        if (l4head == NULL)
+            return -1;
+    }
+    else
+        return -1;
+    
+    memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
+    /* only tcp/udp */
+    if (NEXTHDR_UDP == next_hdr)
+    {
+        udph = (struct udphdr *)l4head;
+        tuple->src.u.udp.port = udph->source;
+        tuple->dst.u.udp.port = udph->dest;
+        skb_udpnum++;
+    }
+    else if (NEXTHDR_TCP == next_hdr)
+    {
+        tcph = (struct tcphdr *)l4head;
+        tuple->src.u.tcp.port = tcph->source;
+        tuple->dst.u.tcp.port = tcph->dest;
+        skb_tcpnum++;
+    }
+    else if (NEXTHDR_ICMP == next_hdr)
+    {
+        icmph = (struct icmp6hdr *)l4head; /* point to ICMPv4 header */
+        tuple->src.u.icmp.id = icmph->icmp6_identifier;
+        tuple->dst.u.icmp.type = icmph->icmp6_type;
+        tuple->dst.u.icmp.code = icmph->icmp6_code;
+    }
+    else
+    {
+        return -1;
+    }
+
+    tuple->src.l3num = AF_INET6;
+    tuple->src.u3.in6 = iph->saddr;
+    tuple->dst.u3.in6 = iph->daddr;
+    tuple->dst.protonum = next_hdr;
+    tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+    return 0;
+}
+
+//´Ë´¦ÐèÒª±£³ÖºÍdev_xmit_completeÒ»ÖÂ
+//ÓÃinlineÎÞ·¨ÌáÈ¡µ½¹«¹²ÎļþÖУ¬Ö»ÄÜfastnat¡¢fast6¸÷·ÅÒ»·Ý
+static inline bool start_xmit_complete(int rc)
+{
+    /*
+     * Positive cases with an skb consumed by a driver:
+     * - successful transmission (rc == NETDEV_TX_OK)
+     * - error while transmitting (rc < 0)
+     * - error while queueing to a different device (rc & NET_XMIT_MASK)
+     */
+    if (likely(rc < NET_XMIT_MASK))
+        return true;
+
+    return false;
+}
+
+//ipv6Êý¾Ý°üµÄ¿ìËÙ´¦Àí£¬hashÓÃRCU»úÖÆ½øÐб£»¤£¬×ܵÄÁ¬½ÓÁ´±íÓÃspin½øÐб£»¤
+int fast6_recv(struct sk_buff *skb)
+{
+    struct nf_conntrack_tuple tuple;
+    fast_entry_data_t *fast6_entry_data = NULL;
+    fast_entry_t *fast6_entry = NULL;
+    struct tcphdr *tcph = NULL;
+    struct net_device *dev = NULL;
+    __u8 next_hdr = 0;
+    unsigned char *l4head;
+    struct ipv6hdr *ip6;
+    
+    print_sun(SUN_DBG, "enter fast_6_recv \n");
+
+    if (fastnat_level == FAST_CLOSE)
+    {
+        return 0;
+    }
+    
+    if (fast6_get_tuple(skb, &tuple) < 0)
+    {
+        print_sun(SUN_DBG, "fast_6_recv get tuple err \n");
+        return 0;
+    }
+    
+    ip6 = ipv6_hdr(skb);
+    if (ip6->nexthdr != IPPROTO_TCP && ip6->nexthdr != IPPROTO_UDP)
+        return 0;
+    
+    rcu_read_lock();
+
+    fast6_entry_data = fast_find_entry_data(working_hash6, &tuple);
+    if (fast6_entry_data == NULL)
+    {
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv fast_6_find null \n");
+        return 0;
+    }
+    
+    /*Åжϱ¨Îij¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚDEVµÄMTU*/
+    dev = fast6_entry_data->outdev;
+    if (!dev || (skb->len > dev->mtu))
+    {
+        skbinfo_add(NULL, SKB_OVER_MTU);
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv outdev err \n");
+        return 0;
+    }
+    
+    //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+    if (strcmp(skb->dev->name, dev->name) == 0)
+    {
+        skbinfo_add(NULL, SKB_LOOP);
+        rcu_read_unlock();
+
+        kfree_skb(skb);
+        printk("loopback skb, free skb\n");
+        return 1;
+    }
+
+    fast6_entry = fast_data_to_entry(fast6_entry_data);
+    if (!fast6_entry)
+    {
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv fast6_entry is null \n");
+        return 0;
+    }
+
+    /* Ö»Óе±Ë«Ïò¿ìËÙÁ´½Ó¶¼½¨Á¢³É¹¦²Å×ßFASTNAT£¬·ñÔò×ß±ê×¼Á÷³Ì */
+	/* udp²»ÐèҪ˫Ïò½¨Á´        */
+	if ((fast6_entry->flags != FAST_ALL_DIR) && (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP))
+    {
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv flags is not FAST_ALL_DIR \n");
+        return 0;
+    }
+
+    /*Ìø¹ýIPV6Í·£¬»ñÈ¡L4Í·Ö¸Õë*/
+    l4head = getipv6uppkg(skb->data, &next_hdr, NULL);
+    if (l4head == NULL)
+    {
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv l4head is null \n");
+        return 0;
+    }
+
+
+
+    if (!(skb = fast_expand_headroom_v6(skb, dev))){
+		rcu_read_unlock();
+        return 1;
+    }
+
+    fast_tcpdump(skb);
+    
+    //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+    if (skb_cloned(skb))
+    {
+        print_sun(SUN_DBG, "fast6_recv clone \n");
+        if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+        {
+            rcu_read_unlock();
+
+            print_sun(SUN_DBG, "fast6_recv clone copy failed !!!\n");
+            printk("pskb_expand_head skb failed, free skb\n");
+            kfree_skb(skb);
+            return 1;
+        }
+    }
+
+    //½öµ±fast6³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+    skb_reset_network_header(skb);
+    skb->isFastnat = 1;
+    skb->priority = fast6_entry_data->priority;
+    skb->mark = fast6_entry_data->mark;
+
+
+    //ÄÚºË×Ô´øµÄ»ùÓÚÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+    struct nf_conn_counter *acct = (struct nf_conn_counter*)nf_conn_acct_find(fast6_entry->ct);
+    if (acct) {
+        enum ip_conntrack_info ctinfo;
+        if (fast6_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL)
+            ctinfo = IP_CT_ESTABLISHED;
+        else 
+            ctinfo = IP_CT_ESTABLISHED_REPLY;
+
+        atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+        atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
+    }
+	
+    /* ¶¨Öƹ¦ÄÜ£¬ÎªÁ˽â¾öµ¥UDP¹à°üʱ£¬ÎÞ·¨ÖªÏþindev½øÐÐÁ÷Á¿Í³¼ÆÎÊÌâ¶¨ÖÆ */
+    if ((fast6_entry_data->indev == NULL) && skb->dev)
+    {
+        fast6_entry_data->indev = skb->dev;
+    }
+
+    // ͳ¼ÆÈë¿ÚÍøÂçÉ豸µÄ½ÓÊÕ°üÊýÁ¿  --- ²Î¿¼linuxÔ­ÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+    if (fast6_entry_data->indev && (fastnat_level == FAST_NET_DEVICE))
+    {
+        fast6_entry_data->indev->stats.rx_packets++;
+        fast6_entry_data->indev->stats.rx_bytes += skb->len;
+    }  
+    
+    skb->dev = dev;
+
+    //Ö»Óе±ÓÐMACÍ·Ô¤¸³ÖµÊ±£¬²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+    skb_push(skb, ETH_HLEN);
+
+    memcpy(skb->data, fast6_entry_data->hh_data, ETH_HLEN);
+    /*¸üÐÂÁ¬½Ó³¬Ê±*/
+    if (IPPROTO_TCP == tuple.dst.protonum)
+    {
+        mod_timer(&fast6_entry->timeout, jiffies + tcp_timeouts[fast6_entry->ct->proto.tcp.state]);
+        tcph = (struct tcphdr *)l4head;
+        update_tcp_timeout(fast6_entry, fast6_entry_data, tcph);
+		fast6_entry->ct->timeout = jiffies + tcp_timeouts[fast6_entry->ct->proto.tcp.state];
+    }
+    else if (IPPROTO_UDP == tuple.dst.protonum)
+    {
+        /*udp*/
+        if (test_bit(IPS_SEEN_REPLY_BIT, &fast6_entry->ct->status))
+        {
+            mod_timer(&fast6_entry->timeout, jiffies + fast_udp_timeout_stream);
+			fast6_entry->ct->timeout = jiffies + fast_udp_timeout_stream;
+        }
+        else
+        {
+            mod_timer(&fast6_entry->timeout, jiffies + fast_udp_timeout);
+			fast6_entry->ct->timeout = jiffies + fast_udp_timeout;
+        }
+    }
+
+    if (skb->dev->flags & IFF_UP)
+    {
+        //pppÖ»ÐèÒª´«ÊäIP°ü
+        if (strncmp(skb->dev->name, ppp_name, strlen(ppp_name)) == 0)
+        {
+           skb_pull(skb, ETH_HLEN);
+        }
+        
+        skb->now_location |= FAST6_SUCC;
+        if (fastnat_level == FAST_NET_DEVICE)
+        {
+            print_sun(SUN_DBG, "fastnat-2 dev_queue_xmit, send to:%s !!!!!!!! \n", skb->dev->name);
+            dev_queue_xmit(skb);
+        }
+        //¶ÔÓÚÁ÷¿ØµÈÌØÊâÓ¦Óã¬Ö»ÄÜ×ß±ê×¼µÄfastnatÁ÷³Ì£¬·ñÔòÎÞ·¨½øÐвå¼þÖ´ÐÐ
+        else if (fastnat_level == FAST_NET_CORE)
+        {
+            dev_queue_xmit(skb);
+        }
+        /*add by jiangjing*/
+        fast6_entry_data->packet_num++;
+
+    }
+    else
+    {
+        print_sun(SUN_DBG, "fast6_recv ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+        kfree_skb(skb);
+    }
+
+    rcu_read_unlock();
+
+    print_sun(SUN_DBG, "fast_6_recv okokok \n");
+    return 1;
+}
+
+static struct nf_hook_ops fast6_hook = {
+    .hook = napt6_handle,
+    //.owner = THIS_MODULE,
+    .pf = PF_INET6,
+    .hooknum = NF_INET_POST_ROUTING,
+    .priority = NF_IP6_PRI_LAST,
+};
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt6_handle(void* priv,
+            struct sk_buff *skb,
+            const struct nf_hook_state *state)
+{
+    struct nf_conn *ct;
+    enum ip_conntrack_info ctinfo;
+    u_int8_t protocol;
+    fast_entry_t *fast6_entry;
+    fast_entry_data_t *fast6_entry_data;
+    enum ip_conntrack_dir dir, rdir;
+    struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+    u_int32_t mask =0;
+#endif
+	struct neighbour *_neighbour = NULL;
+	struct net_device *out = state->out;
+
+
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        return NF_ACCEPT;
+    }
+
+    if (test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+    {
+        return NF_ACCEPT;
+    }
+    
+    if (!out)
+    {
+        return NF_ACCEPT;
+    }
+    
+    if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP && ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
+        return NF_ACCEPT;
+
+    //×é²¥²»½¨Á´
+    if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+    {
+        return NF_ACCEPT;
+    }
+
+    //´Ë´¦Òª¹Ø×¢ÊÇ·ñ»áƵ·±³öÏÖfastÁ´½ÓÒÑÂúÇé¿ö£¬Èç¹û¾­³£³öÏÖ£¬ÊÇ·ñ¿¼ÂÇ×î¾É¸²¸ÇÇé¿ö
+    if (working_list6.count > nf_conntrack_max)
+    {
+        return NF_ACCEPT;
+    }
+    /*ÅжÏÊÇ·ñÓÐÏÂÒ»Ìø*/
+    if (!dst)
+    { 
+        return NF_ACCEPT;
+    }
+	_neighbour = dst_neigh_lookup_skb(dst, skb);
+	if(!_neighbour)
+	{
+		return NF_ACCEPT;
+	}
+    
+    if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+    {
+        goto accept;
+    }
+
+    if (!(ct = nf_ct_get(skb, &ctinfo)))
+    {
+        goto accept;
+    }
+    protocol = nf_ct_protonum(ct);
+
+
+    /* only forward */
+    if (!skb->skb_iif)
+    {
+        goto accept;
+    }
+
+
+    //¹ýÂ˲»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü
+    if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+    {
+        goto accept;
+    }
+
+    dir = CTINFO2DIR(ctinfo);
+    
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+    
+    if (IPPROTO_TCP == protocol)
+    {
+        /* only established */
+        if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+        {
+			goto accept;
+        }
+    }
+    else if (IPPROTO_UDP != protocol)
+    {
+        goto accept;
+    }
+
+    spin_lock_bh(&fast6_spinlock);
+    if (!(fast6_entry = fast_get_entry(&working_list6, ct, dir)))
+    {
+        spin_unlock_bh(&fast6_spinlock);
+        goto accept;
+    }
+    fast6_entry->fast_spinlock = &fast6_spinlock;
+
+    //Ê״ν¨Á´£¬»ñÈ¡ct¼ÆÊýËø£¬²¢É¾³ýct¶¨Ê±Æ÷£»Ê״ν¨Á´Öظ´°ü£¬²»ÄܲÙ×÷
+    if (!(fast6_entry->flags & FAST_ALL_DIR))
+    {
+        nf_conntrack_get(&ct->ct_general);
+        //del_timer(&ct->timeout);
+        ct->timeout = fast6_entry->timeout.expires;
+    }
+
+    fast6_entry_data = &fast6_entry->data[dir];
+    fast6_entry_data->tuplehash.tuple = ct->tuplehash[dir].tuple;
+
+    memcpy(fast6_entry_data->dmac, _neighbour->ha, ETH_ALEN);
+    fast6_entry_data->priority = skb->priority;
+    fast6_entry_data->mark = skb->mark;
+    fast6_entry_data->outdev = out;
+
+    if (!record_MAC_header(working_hash6, ct, fast6_entry, fast6_entry_data, _neighbour, out, htons(ETH_P_IPV6)))
+    {
+        spin_unlock_bh(&fast6_spinlock);
+        goto accept;
+    }
+
+    //´Ë´¦±£Ö¤Õý·´Á½¸ö±ê¼Çλ²»³åÍ»
+    fast6_entry->flags = fast6_entry->flags | (1 << dir);
+    
+    fast_add_entry(working_hash6, fast6_entry_data);
+    
+    if (fast6_entry->flags == FAST_ALL_DIR)
+    {
+        fast6_entry->data[0].indev = fast6_entry->data[1].outdev;
+        fast6_entry->data[1].indev = fast6_entry->data[0].outdev;
+    }
+
+    spin_unlock_bh(&fast6_spinlock);
+
+    ct->fast_ct.isFast = FAST_CT_WND6;
+
+accept:
+	
+	neigh_release(_neighbour);
+    return NF_ACCEPT;
+}
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+int fast6_event(traverse_command_t *cmd)
+{
+    spin_lock_bh(&fast6_spinlock);
+    traverse_process(&working_list6, cmd);
+    spin_unlock_bh(&fast6_spinlock);
+	return 0;
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv6¿ìËÙת·¢ÐÅÏ¢
+void fast6_cleanup_links(void)
+{
+    spin_lock_bh(&fast6_spinlock);
+    fast_cleanup_links(&working_list6);
+    spin_unlock_bh(&fast6_spinlock);
+}
+
+int tsp_fast6_init(void)
+{
+    int ret;
+    
+    print_sun(SUN_DBG,"start init fast6\n");
+
+    working_hash6 = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, /*&fast6hash_vmalloc,*/ 1);
+    if (!working_hash6) 
+    {
+        print_sun(SUN_DBG, "Unable to create working_hash6\n");
+        return -EINVAL;
+    }
+
+    spin_lock_init(&fast6_spinlock);
+
+    ret = nf_register_net_hook(&init_net, &fast6_hook);
+    if (ret != 0)
+    {
+        print_sun(SUN_DBG,"init fast6 failed\n");
+        goto err;
+    }
+    
+    print_sun(SUN_DBG,"init fast6 done\n");
+    return 0;
+    
+err:
+    nf_ct_free_hashtable(working_hash6, /*fast6_hash_vmalloc, */nf_conntrack_htable_size);
+    return -EINVAL;
+}
+
+int tsp_fast6_cleanup(void)
+{
+    nf_unregister_net_hook(&init_net, &fast6_hook);
+    nf_ct_free_hashtable(working_hash6, /*fast6_hash_vmalloc,*/ nf_conntrack_htable_size);
+    
+    print_sun(SUN_DBG,"fast6 cleanup done\n");
+    return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast6_fw.c b/upstream/linux-5.10/net/core/fastproc/fast6_fw.c
new file mode 100755
index 0000000..322175b
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast6_fw.c
@@ -0,0 +1,395 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast6.h>
+#include <net/SI/fast_common.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/ip6_fib.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv6 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************* */
+
+
+/* **************************** º¯ÊýÉêÃ÷ ************************ */
+
+
+/* **************************** º¯ÊýʵÏÖ ************************ */
+extern u32 rt6_peer_genid(void);
+
+int dst_expire_count_v6 = 0;
+extern int no_neighbour;
+extern void ntl_ct_set_iw(struct sk_buff *skb, struct nf_conn *ct, int ct_dir);
+int fast6_fw_recv(struct nf_conn *tmpl,
+                  struct sk_buff *skb,
+                  struct nf_conn *ct,
+                  struct nf_conntrack_l4proto *l4proto,
+                  unsigned int dataoff,
+                  int dir,
+                  u_int8_t protonum)
+{
+    struct net_device *dev = NULL;
+    enum ip_conntrack_info ctinfo;
+    int ret;
+    int  rdir;
+    struct ethhdr * eth;
+    __u8 next_hdr = 0;
+    unsigned char *l4head;
+    struct dst_entry *dst_dir = NULL;
+    struct neighbour *_neighbour = NULL;
+
+	__be16			vlan_proto_raw = skb->vlan_proto;
+	__u16			vlan_tci_raw = skb->vlan_tci;
+	
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+    dst_dir = dst_get_by_ct(ct, dir);
+
+    struct nf_hook_state state = {
+        .hook = NF_INET_PRE_ROUTING,
+        .net = &init_net,
+        .in	= skb->dev,
+        .pf	= NFPROTO_IPV6,
+    };
+
+
+    //TCP±ØÐëË«Ïò½¨Á´£¬UDPµ¥Ïò¼´¿É
+    if (!dst_dir)
+    {
+        goto err_out;
+    }
+
+    // Èç¹û¼Ç¼ÁËÇŵ㣬¾ÍÖ±½ÓÖ¸ÏòÇŵã
+    if (ct->fast_ct.fast_brport[dir])
+    {
+        rcu_read_lock();
+        dev = rcu_dereference_protected(ct->fast_ct.fast_brport[dir], 1);
+        rcu_read_unlock();
+    }
+    else {
+        dev = dst_dir->dev;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+    if (!dev || (skb->len > dev->mtu))
+    {
+        skbinfo_add(NULL, SKB_OVER_MTU);
+        goto err_out;
+    }
+
+    //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+    if (strcmp(skb->dev->name, dev->name) == 0)
+    {
+        skbinfo_add(NULL, SKB_LOOP);
+        //nf_conntrack_put(&ct->ct_general);
+        kfree_skb(skb);
+        goto drop_packet;
+    }
+
+    if (dir == 1) {
+        ctinfo = IP_CT_ESTABLISHED_REPLY;
+    } else {
+        if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+            ctinfo = IP_CT_ESTABLISHED;
+        } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+            ctinfo = IP_CT_RELATED;
+        } else {
+            ctinfo = IP_CT_NEW;
+        }
+    }
+
+
+
+    ret = nf_conntrack_handle_packet_fast(ct, skb, dataoff, ctinfo, &state);
+    if (ret <= 0) {
+        skb->_nfct = 0;
+        goto err_out; // fastʧ°Üǰ¶¼²»Äܸü¸ÄskbµÄÄÚÈÝ£¬·ñÔòʧ°Ü¾ÍÒª×ö»Ö¸´²Ù×÷
+    }
+    //Åжϳö¿ÚdevµÄÍ·²¿¿Õ¼äÊÇ·ñ×ã¹»£¬²»¹»ÐèÒªexpand
+    if (!(skb = fast_expand_headroom_v6(skb, dev))) {
+        //nf_conntrack_put(&ct->ct_general);
+        goto drop_packet;
+    }
+
+    fast_tcpdump(skb);
+
+    //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+    if (skb_cloned(skb))
+    {
+        if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+        {
+            print_sun(SUN_DBG, "fast6_fw_recv clone copy failed !!!\n");
+            kfree_skb(skb);
+            goto drop_packet;
+        }
+        clean_cache(skb->data,skb->len);
+    }
+
+    //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+    skb_reset_network_header(skb);
+    skb->isFastnat = 1;
+    nf_ct_set(skb, (struct nf_conn *)&ct->ct_general, ctinfo);
+
+
+    //»ùÓÚÍø¿ÚµÄÁ÷Á¿Í³¼Æ  --- ²Î¿¼linuxÔ­ÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+    if (fastnat_level == FAST_NET_DEVICE)
+    {
+        skb->dev->stats.rx_packets++;
+        skb->dev->stats.rx_bytes += skb->len;
+    }
+
+    if (dev->flags & IFF_UP)
+    {
+        if (!(dev->flags & IFF_POINTOPOINT)) {
+            //·Çppp¶Ë¿Ú²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+            skb_push(skb, ETH_HLEN);
+			skb_reset_mac_header(skb);
+			if(skb->isvlan == 1)
+			{
+				struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr*)(skb->data - VLAN_HLEN);
+				skb->vlan_proto = vlan_eth->h_vlan_proto;
+				skb->vlan_tci = ntohs(vlan_eth->h_vlan_TCI);				
+			}
+            eth = (struct ethhdr *)skb->data;
+            //³ö¿Údev macµØÖ·×÷ΪÊý¾Ý°üÔ´macµØÖ·
+            memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+            _neighbour = dst_neigh_lookup_skb(dst_dir, skb);
+            if (_neighbour)
+            {
+                memcpy(eth->h_dest, _neighbour->ha, ETH_ALEN);
+                neigh_release(_neighbour);
+            }
+            else {
+                __flush_dcache_area(skb->data, skb->len);
+                kfree_skb(skb);
+                no_neighbour++;
+                goto drop_packet;
+            }
+
+            eth->h_proto = htons(ETH_P_IPV6);
+        }
+        skb->dev = dev;
+        skb->now_location |= FASTNAT_SUCC;
+		skb->vlan_proto = vlan_proto_raw;
+		skb->vlan_tci = vlan_tci_raw;
+        __flush_dcache_area(skb->data, skb->len);
+        spin_unlock_bh(&fast_fw_spinlock);
+        dev_queue_xmit(skb);
+		spin_lock_bh(&fast_fw_spinlock);
+    }
+    else
+    {
+        print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+        kfree_skb(skb);
+    }
+
+
+    print_sun(SUN_DBG, "skb : 0x%x, fast6_fw succ--------", skb);
+
+succ_out:
+drop_packet:
+    if (tmpl)
+        nf_conntrack_put(&tmpl->ct_general);
+    dst_release(dst_dir);
+    return 1;
+
+err_out :
+    dst_release(dst_dir);
+
+    nf_conntrack_put(&ct->ct_general);
+    print_sun(SUN_DBG, "skb : 0x%x, fast6_fw fail!!!!!!!!!!", skb);
+    if (tmpl) {
+        nf_ct_set(skb, (struct nf_conn *)&tmpl->ct_general, IP_CT_NEW);
+    }
+    else {
+        skb->_nfct = 0;
+    }
+    return 0; /* not fast nat */
+}
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt_handle6_fw(void *priv,
+                             struct sk_buff *skb,
+                             const struct nf_hook_state *state)
+{
+    struct nf_conn *ct;
+    enum ip_conntrack_info ctinfo;
+    u_int8_t protocol;
+    enum ip_conntrack_dir dir;
+    struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+    u_int32_t mask =0;
+#endif
+    struct neighbour *_neighbour = NULL;
+    struct net_device *out = state->out;
+
+    //¿ìËÙת·¢×Ü¿ª¹Ø
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        return NF_ACCEPT;
+    }
+
+    //¿ìËÙת·¢×Ó¹¦ÄÜλͼ¿ª¹Ø
+    if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch)
+            || !test_bit(FAST_TYPE_FW6_BIT, &fast_switch))
+    {
+        return NF_ACCEPT;
+    }
+
+    if (!out)
+    {
+        return NF_ACCEPT;
+    }
+
+    //Ö»ÓÐTCP¡¢UDPÖ§³Öfast£¬ICMPÕÒµ½µÄct¿ÉÄÜÊÇTCP¡¢UDPµÄ£¬ÀýÈç: ¶Ë¿Ú²»¿É´ï£¬ËùÒÔ±ØÐëÏÔʾÅжÏ
+    if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP && ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
+        return NF_ACCEPT;
+
+    //×é²¥²»½¨Á´
+    if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+    {
+        return NF_ACCEPT;
+    }
+
+    /*ÅжÏÊÇ·ñÓÐÏÂÒ»Ìø*/
+    if(!dst)
+    {
+        return NF_ACCEPT;
+    }
+
+    _neighbour = dst_neigh_lookup_skb(dst, skb);
+    if(!_neighbour)
+    {
+        return NF_ACCEPT;
+    }
+
+    if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+    {
+        goto accept;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+    if (dst->dev && (skb->len > dst->dev->mtu))
+    {
+        goto accept;
+    }
+    if (!(ct = nf_ct_get(skb, &ctinfo)))
+    {
+        goto accept;
+    }
+
+    protocol = nf_ct_protonum(ct);
+
+    /* only forward */
+    if (!skb->skb_iif)
+    {
+        goto accept;
+    }
+
+    //¹ýÂ˲»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü
+    if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+    {
+        goto accept;
+    }
+
+    dir = CTINFO2DIR(ctinfo);
+
+    /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+    if (IPPROTO_TCP == protocol)
+    {
+        /*TCPÈý´ÎÎÕÊֳɹ¦*/
+        if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+        {
+            goto accept;
+        }
+    }
+    else if (IPPROTO_UDP != protocol)
+    {
+        goto accept;
+    }
+
+    spin_lock_bh(&fast_fw_spinlock);
+    if (ct->fast_ct.fast_dst[dir] && (ct->fast_ct.fast_dst[dir] != dst))
+    {
+        fast_fw_conn_release(ct);
+    }
+
+    if (!ct->fast_ct.fast_dst[dir])
+    {
+        rcu_assign_pointer(ct->fast_ct.fast_dst[dir], dst);
+        ct->fast_ct.fast_brport[dir] = getBridgePort(_neighbour, out);
+        fast_dst_add_ct(dst, ct);
+    }
+
+    ct->fast_ct.isFast = FAST_CT_FW6;
+    spin_unlock_bh(&fast_fw_spinlock);
+accept:
+
+    neigh_release(_neighbour);
+    return NF_ACCEPT;
+}
+
+static struct nf_hook_ops fast6_fw_hook = {
+    .hook = napt_handle6_fw,
+    //.owner = THIS_MODULE,
+    .pf = PF_INET6,
+    .hooknum = NF_INET_POST_ROUTING,
+    .priority = NF_IP6_PRI_LAST,
+};
+
+
+int fast6_fw_init(void)
+{
+    int ret = 0;
+
+    ret = nf_register_net_hook(&init_net, &fast6_fw_hook);
+    if (ret != 0)
+    {
+        print_sun(SUN_ERR,"init fast6_fw_init failed\n");
+        return -EINVAL;
+    }
+    print_sun(SUN_DBG,"init fast6_fw_init done\n");
+
+    return 0;
+}
+
+int fast6_fw_cleanup(void)
+{
+    fast_release_all(RELEASE_ALL_DST);
+    nf_unregister_net_hook(&init_net, &fast6_fw_hook);
+    return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast_common.c b/upstream/linux-5.10/net/core/fastproc/fast_common.c
new file mode 100755
index 0000000..69f3761
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast_common.c
@@ -0,0 +1,2113 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+
+MODULE_LICENSE("GPL");
+
+/* ************************** ¿ìËÙת·¢¹«ÓõıäÁ¿ ************************** */
+struct kmem_cache *fast_head_cache;
+
+spinlock_t fast_fw_spinlock;             //×ÔÐýËø£¬±£»¤×ª·¢Êý¾ÝÏ໥²Ù×÷
+spinlock_t fastlocal_spinlock;           //×ÔÐýËø£¬±£»¤±¾µØÊý¾ÝÏ໥²Ù×÷
+
+/*
+* 0: ¹Ø±Õfastnat£¬×ß±ê×¼linux£¬¿ÉÒÔÖжÏÔ­Á´½ÓÖØÐÂÁ´½Ó
+* 1: ×ßIP²ã±ê×¼fasnat£¬½ø¶øÒýÓÃÈíÖжϵ÷¶È£¬ÐÔÄÜÂԲ³¡¾°ÈçÁ÷¿Ø¹¦ÄÜ
+* 2: ×ßnet_device²ãÃæµÄÇý¶¯µ½Çý¶¯£¬ÎÞÈíÖжÏ
+* 5: ¹Ø±Õfastnat£¬×ß±ê×¼linux£¬±£³ÖÔ­Á´½Ó²»ÖØÐÂÁ´½Ó
+* ¿ÉÒÔͨ¹ýprocÐÞ¸ÄÖµ
+*/
+int fastnat_level = FAST_NET_DEVICE;/*FAST_NET_DEVICE; modify by zdd, close fastnat*/
+
+/* λͼ·½Ê½  --- ¸÷×Ó¹¦ÄÜ¿ìËÙת·¢¿ª¹Ø£¬²Î¿¼fast_common.hÖж¨Òå*/
+
+//unsigned long fast_switch = 0x67;
+unsigned long fast_switch = 0x0;
+
+/* ************************ ×ÓÍø¼ä ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************ */
+int fastbr_level = 1;                 //ÊÇ·ñ´ò¿ªfastbr¹¦ÄÜ
+
+/* **************************** ƽ̨»¯Ïà¹Ø±äÁ¿ **************************** */
+/*
+ *ÏÂÃæbr_nameµÈ¼¸¸ö×Ö·û´®Êý×éͨ¹ýproc½ÚµãÉèÖã¬
+ *´Ë´¦½öÊǶ¨ÒåºÍ¸³³õÖµ¡£
+ */
+char br_name[MAX_NET_DEVICE_NAME_LEN + 1] = "br0";
+char ps_name[MAX_NET_DEVICE_NAME_LEN + 1] = "wan1";
+char usb_name[MAX_NET_DEVICE_NAME_LEN + 1] = "usblan0";
+char ppp_name[MAX_NET_DEVICE_NAME_LEN + 1] = "ppp";
+//cp:µ¥ºË£¬Ä£¿éÐÎ̬ÐèҪ·Óɶ¨ÖÆ
+//ap:Ë«ºË£¬Ä£¿éÐÎ̬¿É¼òµ¥ÇŽÓ
+char need_jilian[MAX_NET_DEVICE_NAME_LEN + 1] = "0";
+
+//ÒÔÏÂÈý¸öÓÅÏȼ¶ÅäÖã¬Ö»×¼ÍøÂç×é¹Ç¸Éµ÷Õû£¬·ñÔòÑÏÖØÓ°Ïì¸÷¸ö·½ÏòµÄÐÔÄÜ
+int  fast_br_level = 1;
+int  fast_fwd_level = 2;
+int  fast_local_level = 0;
+unsigned char zeromac[ETH_ALEN] = "";
+
+/* ÄÚºËÁ´Â·×´Ì¬µÈµÄ±äÁ¿¡¢½Ó¿ÚÌ壬À´×ÔÄںˣ¬ÒÆÖ²ÐÂÄÚºËʱÐèÒª¸üÐÂ*/
+#define sNO TCP_CONNTRACK_NONE
+#define sSS TCP_CONNTRACK_SYN_SENT
+#define sSR TCP_CONNTRACK_SYN_RECV
+#define sES TCP_CONNTRACK_ESTABLISHED
+#define sFW TCP_CONNTRACK_FIN_WAIT
+#define sCW TCP_CONNTRACK_CLOSE_WAIT
+#define sLA TCP_CONNTRACK_LAST_ACK
+#define sTW TCP_CONNTRACK_TIME_WAIT
+#define sCL TCP_CONNTRACK_CLOSE
+#define sS2 TCP_CONNTRACK_SYN_SENT2
+#define sIV TCP_CONNTRACK_MAX
+#define sIG TCP_CONNTRACK_IGNORE
+
+/* What TCP flags are set from RST/SYN/FIN/ACK. */
+enum tcp_bit_set {
+    TCP_SYN_SET,
+    TCP_SYNACK_SET,
+    TCP_FIN_SET,
+    TCP_ACK_SET,
+    TCP_RST_SET,
+    TCP_NONE_SET,
+};
+
+//À´×Ônf_conntrack_proto_tcp.c
+static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
+    {
+        /* ORIGINAL */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*syn*/       { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
+        /*
+         *    sNO -> sSS    Initialize a new connection
+         *    sSS -> sSS    Retransmitted SYN
+         *    sS2 -> sS2    Late retransmitted SYN
+         *    sSR -> sIG
+         *    sES -> sIG    Error: SYNs in window outside the SYN_SENT state
+         *            are errors. Receiver will reply with RST
+         *            and close the connection.
+         *            Or we are not in sync and hold a dead connection.
+         *    sFW -> sIG
+         *    sCW -> sIG
+         *    sLA -> sIG
+         *    sTW -> sSS    Reopened connection (RFC 1122).
+         *    sCL -> sSS
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
+        /*
+         *    sNO -> sIV    Too late and no reason to do anything
+         *    sSS -> sIV    Client can't send SYN and then SYN/ACK
+         *    sS2 -> sSR    SYN/ACK sent to SYN2 in simultaneous open
+         *    sSR -> sSR    Late retransmitted SYN/ACK in simultaneous open
+         *    sES -> sIV    Invalid SYN/ACK packets sent by the client
+         *    sFW -> sIV
+         *    sCW -> sIV
+         *    sLA -> sIV
+         *    sTW -> sIV
+         *    sCL -> sIV
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+        /*
+         *    sNO -> sIV    Too late and no reason to do anything...
+         *    sSS -> sIV    Client migth not send FIN in this state:
+         *            we enforce waiting for a SYN/ACK reply first.
+         *    sS2 -> sIV
+         *    sSR -> sFW    Close started.
+         *    sES -> sFW
+         *    sFW -> sLA    FIN seen in both directions, waiting for
+         *            the last ACK.
+         *            Migth be a retransmitted FIN as well...
+         *    sCW -> sLA
+         *    sLA -> sLA    Retransmitted FIN. Remain in the same state.
+         *    sTW -> sTW
+         *    sCL -> sCL
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*ack*/       { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
+        /*
+         *    sNO -> sES    Assumed.
+         *    sSS -> sIV    ACK is invalid: we haven't seen a SYN/ACK yet.
+         *    sS2 -> sIV
+         *    sSR -> sES    Established state is reached.
+         *    sES -> sES    :-)
+         *    sFW -> sCW    Normal close request answered by ACK.
+         *    sCW -> sCW
+         *    sLA -> sTW    Last ACK detected.
+         *    sTW -> sTW    Retransmitted last ACK. Remain in the same state.
+         *    sCL -> sCL
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
+        /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+    },
+    {
+        /* REPLY */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*syn*/       { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 },
+        /*
+         *    sNO -> sIV    Never reached.
+         *    sSS -> sS2    Simultaneous open
+         *    sS2 -> sS2    Retransmitted simultaneous SYN
+         *    sSR -> sIV    Invalid SYN packets sent by the server
+         *    sES -> sIV
+         *    sFW -> sIV
+         *    sCW -> sIV
+         *    sLA -> sIV
+         *    sTW -> sIV    Reopened connection, but server may not do it.
+         *    sCL -> sIV
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+        /*
+         *    sSS -> sSR    Standard open.
+         *    sS2 -> sSR    Simultaneous open
+         *    sSR -> sIG    Retransmitted SYN/ACK, ignore it.
+         *    sES -> sIG    Late retransmitted SYN/ACK?
+         *    sFW -> sIG    Might be SYN/ACK answering ignored SYN
+         *    sCW -> sIG
+         *    sLA -> sIG
+         *    sTW -> sIG
+         *    sCL -> sIG
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+        /*
+         *    sSS -> sIV    Server might not send FIN in this state.
+         *    sS2 -> sIV
+         *    sSR -> sFW    Close started.
+         *    sES -> sFW
+         *    sFW -> sLA    FIN seen in both directions.
+         *    sCW -> sLA
+         *    sLA -> sLA    Retransmitted FIN.
+         *    sTW -> sTW
+         *    sCL -> sCL
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*ack*/       { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
+        /*
+         *    sSS -> sIG    Might be a half-open connection.
+         *    sS2 -> sIG
+         *    sSR -> sSR    Might answer late resent SYN.
+         *    sES -> sES    :-)
+         *    sFW -> sCW    Normal close request answered by ACK.
+         *    sCW -> sCW
+         *    sLA -> sTW    Last ACK detected.
+         *    sTW -> sTW    Retransmitted last ACK.
+         *    sCL -> sCL
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
+        /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+    }
+};
+
+#define SECS * HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+#define DAYS * 24 HOURS
+
+//À´×Ônf_conntrack_proto_tcp.c
+unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
+    [TCP_CONNTRACK_SYN_SENT]    = 2 MINS,
+    [TCP_CONNTRACK_SYN_RECV]    = 5 MINS, //60 SECS,
+    [TCP_CONNTRACK_ESTABLISHED]    = 2 HOURS, //5 DAYS
+    [TCP_CONNTRACK_FIN_WAIT]    = 2 MINS,
+    [TCP_CONNTRACK_CLOSE_WAIT]    = 60 SECS,
+    [TCP_CONNTRACK_LAST_ACK]    = 30 SECS,
+    [TCP_CONNTRACK_TIME_WAIT]    = 2 MINS,
+    [TCP_CONNTRACK_CLOSE]        = 120 SECS, /*normal is 10SEC*/
+    [TCP_CONNTRACK_SYN_SENT2]    = 2 MINS,
+    /* RFC1122 says the R2 limit should be at least 100 seconds.
+       Linux uses 15 packets as limit, which corresponds
+       to ~13-30min depending on RTO. */
+    //[TCP_CONNTRACK_MAX]    = 2 MINS,
+    //[TCP_CONNTRACK_IGNORE]    = 2 MINS,
+    [TCP_CONNTRACK_RETRANS]    = 5 MINS,
+    [TCP_CONNTRACK_UNACK]      = 5 MINS,
+};
+
+unsigned int fast_udp_timeout_stream = 180*HZ;
+unsigned int fast_udp_timeout = 120*HZ; /*normal is 30*HZ*/
+
+//²»Ö§³ÖfastnatµÄЭÒéÀàÐÍ
+//²»ÔÙʹÓ㬸ijÉͨ¹ýproc¶¯Ì¬´«Èëµ½nofast_proto£¬ÔÝʱ±£Áô¶Îʱ¼ä£¬Èÿª·¢Á˽â¶ÔÓ¦µÄ¶Ë¿ÚЭÒéºÅ
+unsigned int nofast_port[NOFAST_PROTO_MAX] = {
+    21,      // FTP¶Ë¿Ú£¬ÓÐʱ±»Îļþ·þÎñЭÒé (FSP)ʹÓÃ
+    22,      // ssh °²È«Shell(SSH)·þÎñ
+    23,      // telnet Telnet ·þÎñ
+    25,      // smtp ¼òµ¥Óʼþ´«ÊäЭÒé(SMTP)
+    53,      // domain ÓòÃû·þÎñ(Èç BIND)
+    67,      // server¶Ëdhcp·þÎñ¶Ë¿Ú
+    68,      // client¶Ëdhcp·þÎñ¶Ë¿Ú
+    69,      // tftp СÎļþ´«ÊäЭÒé(TFTP)
+    110,     // ÓʾÖЭÒé°æ±¾3
+    115,     // sftp °²È«Îļþ´«ÊäЭÒé(SFTP)·þÎñ
+    123,     // ntp ÍøÂçʱ¼äЭÒé(NTP)
+    443,     // https °²È«³¬Îı¾´«ÊäЭÒé(HTTP)
+    500,     // isakmp »¥ÁªÍø°²È«¹ØÁªºÍÔ¿³×¹ÜÀíЭÒé(ISAKMP)
+    1352,    // Lotus Notes
+    1723,    // PPTP TCP
+    1990,    // stun-p1 cisco STUN Priority 1 port
+    1991,    // stun-p2 cisco STUN Priority 2 port
+    1992,    // stun-p3 cisco STUN Priority 3 port,ipsendmsg IPsendmsg
+    1993,    // snmp-tcp-port cisco SNMP TCP port
+    1994,    // stun-port cisco serial tunnel portTCP
+    1995,    // perf-port cisco perf portTCP
+    1996,    // tr-rsrb-port cisco Remote SRB portTCP
+    1997,    // gdp-port Cisco Íø¹Ø·¢ÏÖЭÒé(GDP)
+    1998,    // x25-svc-port cisco X.25 service
+    4500,    // NAT-T UDP
+    5060     // ¶Ë¿Ú¶Ë¿Ú:5060/udpÃèÊö:SessionInitiationProtocol(SIP»Ø»°·¢ÆðЭÒé)
+};
+
+/* ******************************* º¯ÊýÉêÃ÷ ******************************* */
+int (*fast_nat4_proc)(struct sk_buff *skb);
+int (*fast_nat6_proc)(struct sk_buff *skb);
+int (*fast_fw4_proc)(struct nf_conn *tmpl,
+                     struct sk_buff *skb,
+                     struct nf_conn *ct,
+                     struct nf_conntrack_l4proto *l4proto,
+                     unsigned int dataoff,
+                     int dir,
+                     u_int8_t protonum);
+int (*fast_fw6_proc)(struct nf_conn *tmpl,
+                     struct sk_buff *skb,
+                     struct nf_conn *ct,
+                     struct nf_conntrack_l4proto *l4proto,
+                     unsigned int dataoff,
+                     int dir,
+                     u_int8_t protonum);
+
+int (*fast_local4_proc)(struct nf_conn *tmpl,
+                        struct sk_buff *skb,
+                        struct nf_conn *ct,
+                        struct nf_conntrack_l4proto *l4proto,
+                        unsigned int dataoff,
+                        int dir,
+                        u_int8_t protonum);
+int (*fast_local6_proc)(struct nf_conn *tmpl,
+                        struct sk_buff *skb,
+                        struct nf_conn *ct,
+                        struct nf_conntrack_l4proto *l4proto,
+                        unsigned int dataoff,
+                        int dir,
+                        u_int8_t protonum);
+int (*fast_local4_output_proc)(struct sk_buff *skb);
+int (*fast_local6_output_proc)(struct sk_buff *skb);
+
+int (*fast_br_proc)(struct sk_buff *skb);
+
+extern int fast_nat_recv(struct sk_buff *skb);
+extern int fast6_recv(struct sk_buff *skb);
+
+unsigned long iphdr_err_num =0;
+unsigned long ip6hdr_err_num =0;
+unsigned long tcphdr_err_num =0;
+unsigned long tcp6hdr_err_num =0;
+
+extern int fast4_fw_recv(struct nf_conn *tmpl,
+                         struct sk_buff *skb,
+                         struct nf_conn *ct,
+                         struct nf_conntrack_l4proto *l4proto,
+                         unsigned int dataoff,
+                         int dir,
+                         u_int8_t protonum);
+extern int fast6_fw_recv(struct nf_conn *tmpl,
+                         struct sk_buff *skb,
+                         struct nf_conn *ct,
+                         struct nf_conntrack_l4proto *l4proto,
+                         unsigned int dataoff,
+                         int dir,
+                         u_int8_t protonum);
+
+//extern int fast_br(struct sk_buff *skb);
+//extern struct net_device *getbrport_bydst(struct net_device *dev,unsigned char *dest);
+extern struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+                      const struct nf_conntrack_tuple *tuple);
+
+
+extern int (*fast_from_softirq) (struct sk_buff *skb);
+extern int (*fast_from_driver)(struct sk_buff *skb, struct net_device* dev);
+
+extern void fastnat_cleanup_links(void);
+extern void fast6_cleanup_links(void);
+
+extern fast_entry_t *cur_timeout_entry;
+extern int tcpack_timeout(fast_entry_t *entry, unsigned long *next_schedule, int *set_next);
+extern int tcpack_rel(fast_entry_t *entry);
+
+extern int tsp_fastnat_init(void);
+extern int tsp_fastnat_cleanup(void);
+
+extern int fast4_fw_init(void);
+extern int fast6_fw_init(void);
+
+extern int fast4_fw_cleanup(void);
+extern int fast6_fw_cleanup(void);
+
+extern int tsp_fast6_init(void);
+extern int tsp_fast6_cleanup(void);
+
+extern int fastnat_event(traverse_command_t *cmd);
+extern int fast6_event(traverse_command_t *cmd);
+
+//¿ìËÙת·¢procÎļþµÄ³õʼ»¯
+extern int fast_conntrack_init_proc(void );
+
+//ÄÚºËÆ½Ì¨»¯procÎļþµÄ³õʼ»¯
+extern int net_adapter_init_proc(void );
+
+unsigned int (*tsp_mirror_handle)(struct sk_buff *skb);
+
+extern void net_dbg_perf_dev_recv(char * packet_addr,char* node_str);
+extern void net_dbg_perf_clear_last_item(struct sk_buff *skb);
+
+
+/* ******************************* º¯ÊýʵÏÖ ******************************* */
+static int fast_iphdr_check(struct sk_buff *skb, int proto)
+{
+    const struct iphdr *iph;
+    const struct ipv6hdr *ip6h;
+    u32 len;
+
+    if (proto == ETH_P_IP)
+    {
+        iph = ip_hdr(skb);
+
+        if (iph->ihl < 5 || iph->version != 4)
+            return 0;
+
+        len = ntohs(iph->tot_len);
+        if (skb->len < len) {
+            return 0;
+        }
+        if (len < (iph->ihl*4))
+            return 0;
+    }
+    else if(proto == ETH_P_IPV6)
+    {
+        ip6h = ipv6_hdr(skb);
+        if (ip6h->version != 6)
+            return 0;
+
+        len = ntohs(ip6h->payload_len);
+        if (len || ip6h->nexthdr != NEXTHDR_HOP) {
+            if (len + sizeof(struct ipv6hdr) > skb->len) {
+                return 0;
+            }
+        }
+
+    }
+
+    return 1;
+}
+
+/*
+ * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c
+ *
+ * This function parses (probably truncated) exthdr set "hdr"
+ * of length "len". "nexthdrp" initially points to some place,
+ * where type of the first header can be found.
+ *
+ * It skips all well-known exthdrs, and returns pointer to the start
+ * of unparsable area i.e. the first header with unknown type.
+ * if success, *nexthdr is updated by type/protocol of this header.
+ *
+ * NOTES: - it may return pointer pointing beyond end of packet,
+ *          if the last recognized header is truncated in the middle.
+ *        - if packet is truncated, so that all parsed headers are skipped,
+ *          it returns -1.
+ *        - if packet is fragmented, return pointer of the fragment header.
+ *        - ESP is unparsable for now and considered like
+ *          normal payload protocol.
+ *        - Note also special handling of AUTH header. Thanks to IPsec wizards.
+ */
+
+static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+                                  u8 *nexthdrp, int len)
+{
+    u8 nexthdr = *nexthdrp;
+
+    while (ipv6_ext_hdr(nexthdr)) {
+        struct ipv6_opt_hdr hdr;
+        int hdrlen;
+
+        if (len < (int)sizeof(struct ipv6_opt_hdr))
+            return -1;
+        if (nexthdr == NEXTHDR_NONE)
+            break;
+        if (nexthdr == NEXTHDR_FRAGMENT)
+            break;
+        if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
+            BUG();
+        if (nexthdr == NEXTHDR_AUTH)
+            hdrlen = (hdr.hdrlen+2)<<2;
+        else
+            hdrlen = ipv6_optlen(&hdr);
+
+        nexthdr = hdr.nexthdr;
+        len -= hdrlen;
+        start += hdrlen;
+    }
+
+    *nexthdrp = nexthdr;
+    return start;
+}
+
+
+static int fast_tcphdr_check(struct sk_buff *skb, int proto)
+{
+    const struct iphdr *iph = NULL;
+    const struct ipv6hdr *ip6h = NULL;
+    struct tcphdr *tcph = NULL;
+    unsigned int iphdr_len = 0;
+    unsigned int ip6hdr_len = 0;
+    unsigned int tcphdr_len = 0;
+    unsigned char *l4head = NULL;
+    __u8 protonum;
+    int extoff = 0;
+
+
+
+    if (proto == ETH_P_IP)
+    {
+        iph = ip_hdr(skb);
+        iphdr_len = iph->ihl * 4;
+        tcph = (struct tcphdr *)((unsigned char*)iph + iphdr_len);
+        tcphdr_len = sizeof(struct tcphdr);
+
+        if (tcphdr_len > skb->len - iphdr_len)
+            return 0;
+
+        //tcpÍ·³¤¶ÈºÍdoffÊÇ·ñÆ¥Åä
+        if (tcph->doff < tcphdr_len/4)
+            return 0;
+
+        if (tcph->doff*4 > skb->len - iphdr_len)
+            return 0;
+    }
+    else if(proto == ETH_P_IPV6)
+    {
+        ip6h = ipv6_hdr(skb);
+        ip6hdr_len = sizeof(struct ipv6hdr);
+        tcphdr_len = sizeof(struct tcphdr);
+
+        //²Î¿¼º¯Êýipv6_get_l4proto£¬È¡³öËIJãЭÒéºÅ
+        extoff = skb_network_offset(skb) + ip6hdr_len;
+        protonum = 0;
+        if (skb_copy_bits(skb, skb_network_offset(skb) + offsetof(struct ipv6hdr, nexthdr),
+                          &protonum, sizeof(protonum)) != 0) {
+            return 0;
+        }
+        extoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &protonum, skb->len - extoff);
+
+        if(protonum != NEXTHDR_TCP)
+            return 1;
+
+        tcph = (struct tcphdr *)((unsigned char*)ip6h + extoff);
+        if (tcphdr_len > skb->len - extoff)
+            return 0;
+
+        //tcpÍ·³¤¶ÈºÍdoffÊÇ·ñÆ¥Åä
+        if (tcph->doff < tcphdr_len/4)
+            return 0;
+
+        if (tcph->doff*4 > skb->len - extoff)
+            return 0;
+    }
+
+
+    return 1;
+}
+static inline int deliver_skb(struct sk_buff *skb,
+                              struct packet_type *pt_prev,
+                              struct net_device *orig_dev)
+{
+    atomic_inc(&skb->users.refs);
+    return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+}
+
+void *get_ct_for_ap(struct sk_buff *skb)
+{
+	if(skb){
+		enum ip_conntrack_info ctinfo;
+		struct nf_conn * ct = nf_ct_get(skb, &ctinfo);
+		if(ct){
+			nf_conntrack_get(&ct->ct_general);
+			nf_conntrack_get(&ct->ct_general);
+			return &ct->ct_general;
+		}
+	}
+	return NULL;
+}
+
+void put_ct_for_ap(void *pct)
+{
+	struct nf_conn *ct = (struct nf_conn *)pct;
+	nf_conntrack_put((struct nf_conntrack *)ct);
+	nf_conntrack_put((struct nf_conntrack *)ct);
+}
+
+//´Ë´¦½øÐÐRAW_PACKETÀàÐ͵Äsocket½ÓÊÕ´¦Àí£¬ÒÔ½â¾ö__netif_receive_skbÖÐÕý³£×¥°üʱ£¬Êý¾Ý°üÄÚÈÝÒѱ»Ð޸ĵÄÇé¿ö
+void fast_tcpdump(struct sk_buff *skb)
+{
+    struct packet_type *ptype = NULL;
+
+    rcu_read_lock();
+    list_for_each_entry_rcu(ptype, &ptype_all, list)
+    {
+        if (!ptype->dev || ptype->dev == skb->dev)
+        {
+            skbinfo_add(NULL, SKB_IRQ_FREE);
+            deliver_skb(skb, ptype, skb->dev);
+        }
+    }
+    rcu_read_unlock();
+}
+
+//²Î¿¼ip_finish_output2,À©³äskbÍ·²¿
+struct sk_buff *fast_expand_headroom(struct sk_buff *skb, struct net_device *dev) {
+    unsigned int hh_len = LL_RESERVED_SPACE(dev);
+    struct sk_buff *skb2 = NULL;
+    if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
+        skb2 = skb_realloc_headroom(skb, max(hh_len, NET_SKB_PAD));
+        if(skb2)
+            clean_cache(skb2->data,skb2->len);
+        kfree_skb(skb);
+        return skb2;
+    }
+    return skb;
+}
+
+//²Î¿¼ip6_xmit,À©³äskbÍ·²¿
+struct sk_buff *fast_expand_headroom_v6(struct sk_buff *skb, struct net_device *dev) {
+    unsigned int hh_len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr);
+    struct sk_buff *skb2 = NULL;
+    if (unlikely(skb_headroom(skb) < hh_len)) {
+        skb2 = skb_realloc_headroom(skb, max(hh_len, NET_SKB_PAD));
+        if(skb2)
+            clean_cache(skb2->data,skb2->len);
+        kfree_skb(skb);
+        return skb2;
+    }
+    return skb;
+}
+
+
+/* ɾ³ýÒ»ÌõÁ¬½Ó */
+fast_entry_t *fn_list_del(fast_list_t *list_head, fast_entry_t *entry)
+{
+    fast_entry_t *ret_entry = NULL, **pprev = NULL;
+
+    if (!entry)
+    {
+        return NULL;
+    }
+
+    pprev = &list_head->next;
+    for (ret_entry = list_head->next; ret_entry; ret_entry = ret_entry->next)
+    {
+        if (ret_entry == entry)
+        {
+            *pprev = ret_entry->next;
+            list_head->count--;
+            break;
+        }
+        pprev = &ret_entry->next;
+    }
+    kmem_cache_free(fast_head_cache, entry);
+    netslab_dec(FAST_SLAB);
+    return NULL;
+}
+
+//Ìí¼Ó½Úµã
+void fn_list_add(fast_list_t *list_head, fast_entry_t *entry)
+{
+    entry->next = list_head->next;
+    list_head->next = entry;
+    list_head->count++;
+}
+
+//²éѯÊý¾Ý
+fast_entry_data_t *fast_find_entry_data(const struct hlist_nulls_head *working_hash, const struct nf_conntrack_tuple *tuple)
+{
+    struct nf_conntrack_tuple_hash *h;
+    struct hlist_nulls_node *n;
+    unsigned int hash;
+
+    hash = hash_conntrack_fast(tuple);
+    hlist_nulls_for_each_entry_rcu(h, n, &working_hash[hash], hnnode)
+    {
+        if (nf_ct_tuple_equal(tuple, &h->tuple))
+        {
+            return fast_hash_to_data(h);
+        }
+    }
+
+    return NULL;
+}
+
+//Ìí¼Ó½Úµã
+int fast_add_entry(struct hlist_nulls_head *working_hash, fast_entry_data_t *entry_data)
+{
+    unsigned int hash;
+
+    hash = hash_conntrack_fast(&entry_data->tuplehash.tuple);
+    if (fast_find_entry_data(working_hash, &entry_data->tuplehash.tuple))
+    {
+        return 0;
+    }
+
+    hlist_nulls_add_head_rcu(&entry_data->tuplehash.hnnode, &working_hash[hash]);
+    //ÒòΪÔÚµ±Ç°Á÷³ÌÖУ¬devÒѾ­±»holdסÁË£¬ËùÒÔ´Ë´¦²»ÓÃrcu_read_lock();±£»¤£¬ÎÊÌâ²»´ó
+    dev_hold(entry_data->outdev);
+
+    return 0;
+}
+
+static void workinghash_del_node(fast_entry_t *entry)
+{
+    int i = 0;
+
+    for (i = 0; i < IP_CT_DIR_MAX; i++)
+    {
+        //if (entry->flags & (1 << i))
+        if ((entry->flags & (1 << i))&&(0 != entry->data[i].tuplehash.hnnode.next))
+        {
+            hlist_nulls_del_rcu(&entry->data[i].tuplehash.hnnode);
+            //hlist_nulls_del(&entry->data[i].tuplehash.hnnode);
+            dev_put(entry->data[i].outdev);
+        }
+    }
+}
+
+/*ɾ³ýÁ¬½Ó*/
+static void fastlist_del_entry(fast_list_t *list_head, fast_entry_t *entry)
+{
+    tcpack_rel(entry);
+    //nf_ct_put(entry->ct);
+    atomic_dec(&(entry->ct->ct_general.use));
+    fn_list_del(list_head, entry);
+}
+
+/*fast³¬Ê±´¦Àí£¬É¾³ýÁ¬½Ó*/
+/*jiangjing, ÐÞ¸ÄÈë¿Ú²ÎÊýÀàÐÍΪunsigned long*/
+extern spinlock_t fast6_spinlock;
+extern spinlock_t fastnat_spinlock;
+static void fast_timeout(struct timer_list *ptimer)
+{
+    fast_entry_t *entry = (fast_entry_t *)(ptimer->data);
+    fast_entry_t *ret_entry = NULL;
+    struct fast_list_s *list_head = entry->list_head;
+    spinlock_t *fast_spinlock = entry->fast_spinlock;
+    if(fast_spinlock == &fast6_spinlock || fast_spinlock == &fastnat_spinlock)
+    {
+        spin_lock_bh(fast_spinlock);
+
+        for (ret_entry = list_head->next; ret_entry; ret_entry = ret_entry->next)
+        {
+            if (ret_entry == entry)
+            {
+                workinghash_del_node(entry);
+                fastlist_del_entry(entry->list_head, entry);
+                spin_unlock_bh(fast_spinlock);
+                return;
+            }
+        }
+        spin_unlock_bh(fast_spinlock);
+    }
+}
+
+//²éѯ½Úµã£¬²é²»µ½Ôòд´½¨
+fast_entry_t *fast_get_entry(fast_list_t *list_head, struct nf_conn *ct, char dir)
+{
+    fast_entry_t *ret = NULL;
+    u_int8_t protocol;
+    unsigned long expires;
+
+    for (ret = list_head->next; ret; ret = ret->next)
+    {
+        if (ret->ct == ct)
+        {
+            protocol = nf_ct_protonum(ct);
+            if (IPPROTO_TCP == protocol)
+            {
+                /*tcp*/
+                expires = jiffies + tcp_timeouts[ct->proto.tcp.state];
+            }
+            else
+            {
+                /*udp*/
+                if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+                {
+                    expires = jiffies + fast_udp_timeout_stream;
+                }
+                else
+                {
+                    expires = jiffies + fast_udp_timeout;
+                }
+
+
+            }
+            mod_timer(&ret->timeout, expires);
+            return ret;
+        }
+    }
+
+    /*Ö»Õë¶Ôoriginal·½Ïò´´½¨Á¬½Ó*/
+    if (IP_CT_DIR_ORIGINAL != dir)
+    {
+        return NULL;
+    }
+
+    //Á½ÖÖ¶¼ÊÇslab»úÖÆ£¬kmallocÊÇͨÓÃslab£¬ºóÕßÊÇרÊôslab£¬¸Ä³ÉרÊôslab
+    ret = kmem_cache_alloc(fast_head_cache, GFP_ATOMIC);
+    if (ret == NULL)
+    {
+        print_sun(SUN_ERR,"fast_get_entry: kmalloc fail!\n");
+        return NULL;
+    }
+    netslab_inc(FAST_SLAB);
+    memset(ret, 0, sizeof(fast_entry_t));
+    ret->ct = ct;
+    ret->list_head = list_head;
+
+    //ÉèÖö¨Ê±Æ÷
+    __init_timer(&ret->timeout, NULL, 0);
+
+    protocol = nf_ct_protonum(ct);
+    if (IPPROTO_TCP == protocol)
+    {
+        /*tcp*/
+        ret->timeout.expires = jiffies + tcp_timeouts[ct->proto.tcp.state];
+    }
+    else
+    {
+        /*udp*/
+        if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+        {
+            ret->timeout.expires = jiffies + fast_udp_timeout_stream;
+        }
+        else
+        {
+            ret->timeout.expires = jiffies + fast_udp_timeout;
+        }
+    }
+
+    ret->timeout.data = (unsigned long)ret;
+    ret->timeout.function = fast_timeout;
+    add_timer(&ret->timeout);
+
+    fn_list_add(list_head, ret);
+
+    return ret;
+}
+
+unsigned int get_conntrack_index(const struct tcphdr *tcph)
+{
+    if (tcph->rst) return TCP_RST_SET;
+    else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
+    else if (tcph->fin) return TCP_FIN_SET;
+    else if (tcph->ack) return TCP_ACK_SET;
+    else return TCP_NONE_SET;
+}
+
+/*¸üÐÂtcp³¬Ê±¶¨Ê±Æ÷*/
+void update_tcp_timeout(fast_entry_t *entry, fast_entry_data_t *entry_data, struct tcphdr *tcph)
+{
+    enum tcp_conntrack new_state, old_state;
+    unsigned int dir, index;
+
+    old_state = entry->ct->proto.tcp.state;
+    dir = entry_data->tuplehash.tuple.dst.dir;
+
+    if (tcph == NULL || old_state >=TCP_CONNTRACK_MAX)
+    {
+        print_sun(SUN_ERR,"update_tcp_timeout tcph is null! \n");
+        return;
+    }
+    index = get_conntrack_index(tcph);
+
+    /*¸üÐÂTCPÁ´½Ó״̬*/
+    new_state = tcp_conntracks[dir][index][old_state];
+    if(old_state != new_state)
+    {
+        //²Î¿¼nf_conntrack_proto_tcp.cº¯Êýtcp_packet
+        if (new_state == TCP_CONNTRACK_IGNORE)
+            new_state = TCP_CONNTRACK_SYN_RECV;
+        else if (new_state == TCP_CONNTRACK_MAX)
+            return;
+
+        entry->ct->proto.tcp.state = new_state;
+        //¶¨Ê±Æ÷´æ»îʱ¼ä¸Ä±äʱ²Åµ÷Óó¬Ê±¼ì²â£¬¼õÉÙ±éÀúÁ´±í´ÎÊý
+        mod_timer(&entry->timeout, jiffies + tcp_timeouts[new_state]);
+    }
+}
+
+/* ¼Ç¼¶ÔÓ¦µÄmacÐÅÏ¢£¬³É¹¦·µ»Ø1£¬·ñÔò·µ»Ø0 */
+int record_MAC_header(const struct hlist_nulls_head *working_hash, struct nf_conn *ct,
+                      fast_entry_t *entry, fast_entry_data_t *entry_data,
+                      struct neighbour *neigh, const struct net_device *out, int proto)
+{
+    struct ethhdr *eth;
+    struct net_device *dst_out = NULL;
+    int i;
+
+    if (out == NULL)
+        goto REL;
+
+    //½ö¶ÔÒÔÌ«ÍøÀàÍø¿Ú½øÐÐMACÍ·Ô¤¸³Öµ£¬ÆäËüµÄÈçPPP¡¢PPPoEÕâЩ£¬½ÔÊǿɱäµÄ£¬²»ÄÜÔ¤¸³Öµ£»²Î¼ûalloc_netdevºÍalloc_etherdev½Ó¿Ú
+    if (out->type != ARPHRD_ETHER)
+        return 1;
+
+    //¶ÔÓÚ³ö¿ÚÎªÍøÇÅbrʱ£¬Ö±½Ó»ñÈ¡L2ÍøÂçÉ豸£¬¼´usb0»òwifi0
+    if (out->priv_flags & IFF_EBRIDGE)
+    {
+        if(out->dev_addr == NULL)
+            goto REL;
+
+        if (fastbr_level == 1)
+        {
+            dst_out = getbrport_bydst(out, neigh->ha);
+            if (dst_out == NULL)
+            {
+                print_sun(SUN_DBG,"!!!!! getbrport_bydst fail \n");
+                goto REL;
+            }
+            entry_data->outdev = dst_out;
+        }
+        else
+        {
+            entry_data->outdev = out;
+        }
+        entry_data->hh_flag = 1;
+        eth = (struct ethhdr *)entry_data->hh_data;
+        eth->h_proto = proto;
+        memcpy(eth->h_source, out->dev_addr, ETH_ALEN);
+        memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+    }
+    //pppת·¢£¬Ö»ÐèÒª´«ËÍIP°ü
+    else if (strncmp(out->name, ppp_name, strlen(ppp_name)) == 0)
+    {
+        if(out->dev_addr == NULL)
+            goto REL;
+
+        entry_data->outdev = out;
+        entry_data->hh_flag = 0;
+    }
+    //ÆÕͨµÄÒÔÌ«ÍøÊý¾Ýת·¢
+    else
+    {
+        if(out->dev_addr == NULL)
+            goto REL;
+
+        //¶ÔÓÚwifi station/RJ45/USBµÈ£¬ÐèÒª¸³ÖµMACÍ·
+        entry_data->outdev = out;
+        entry_data->hh_flag = 1;
+        eth = (struct ethhdr *)entry_data->hh_data;
+        eth->h_proto = proto;
+        memcpy(eth->h_source, out->dev_addr, ETH_ALEN);
+        memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+    }
+    return 1;
+
+REL:
+    //֮ǰÁ´½Ó¿ÉÄÜÒѱ»´´½¨£¬ÐèҪɾ³ýËùÓÐ×ÊÔ´
+    for (i = 0; i < IP_CT_DIR_MAX; i++)
+    {
+        if (entry->flags & (1 << i))
+        {
+            hlist_nulls_del(&entry->data[i].tuplehash.hnnode);
+            dev_put(entry->data[i].outdev);
+        }
+    }
+    //»Ö¸´ctµÄ³¬Ê±
+    //add_timer(&ct->timeout);
+    //nf_ct_put(ct);
+    atomic_dec(&(ct->ct_general.use));
+    del_timer(&entry->timeout);
+    fn_list_del(entry->list_head, entry);
+    return 0;
+}
+
+/* ¸ù¾ÝÄ¿µÄMACºÍÍøÇŲéÕÒÇŵ㣬ÕÒµ½·µ»ØÇŵ㣬·ñÔò·µ»ØNULL */
+struct net_device *getBridgePort(struct neighbour *neigh, const struct net_device *out)
+{
+    struct net_device *dst_out = NULL;
+
+    if (!test_bit(FAST_TYPE_BR_LOCAL_BIT, &fast_switch))
+        return NULL;
+
+    if (!out || !neigh)
+        return NULL;
+
+    //½ö¶ÔÒÔÌ«ÍøÀàÍø¿Ú½øÐÐMACÍ·Ô¤¸³Öµ
+    if (out->type != ARPHRD_ETHER)
+        return NULL;
+
+    //¶ÔÓÚ³ö¿ÚÎªÍøÇÅʱ£¬Ö±½Ó»ñÈ¡L2ÇŵãÉ豸
+    if (out->priv_flags & IFF_EBRIDGE)
+    {
+        if (out->dev_addr == NULL)
+            return NULL;
+
+        //»ñÈ¡Çŵã
+        dst_out = getbrport_bydst(out, neigh->ha);
+        if (dst_out && dst_out != out)
+            return dst_out;
+
+        print_sun(SUN_DBG, "!!!!! getbrport_bydst fail \n");
+    }
+    return NULL;
+}
+
+//Åжϲ»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü
+int check_skip_ports(unsigned int net_dst_port)
+{
+    int i = 0;
+    unsigned int dst_port = htons(net_dst_port);
+
+    if (!dst_port)
+        return 0;
+
+    for (i = 0; i < sizeof(nofast_port)/sizeof(nofast_port[0]); i++)
+    {
+        if (dst_port == nofast_port[i])
+        {
+            return 1;
+        }
+    }
+    return 0;
+}
+
+//Á´±íµÄ²Ù×÷Ö÷Ì庯Êý£¬ÄÚ²¿ÊµÏÖ³¬Ê±¡¢É豸ʼþµÈÁ´±í²Ù×÷
+void traverse_process(fast_list_t *list_head, unsigned long param)
+{
+    fast_entry_t *entry, *next;
+    traverse_command_t *cmd;
+    int i, need_del;
+
+    cmd = (traverse_command_t *)param;
+    if (!cmd)
+    {
+        return;
+    }
+
+    for(entry = list_head->next; entry; entry = next)
+    {
+        next = entry->next;
+        need_del = 0;
+
+        if (cmd->cmd == TRAVERSE_CMD_DEV_DOWN)
+        {
+            for (i = 0; i < IP_CT_DIR_MAX; i++)
+            {
+                if (entry->flags & (1 << i))
+                {
+                    const struct nf_conn_nat *nat = nfct_nat(entry->ct);
+                    if ((entry->data[i].outdev && entry->data[i].outdev->ifindex == cmd->arg)
+                            || (nat && nat->masq_index == cmd->arg))
+                    {
+                        need_del = FAST_ALL_DIR;
+                        break;
+                    }
+                }
+            }
+        }
+
+        if (need_del)
+        {
+            del_timer(&entry->timeout);
+            workinghash_del_node(entry);
+            if (need_del == FAST_ALL_DIR)
+            {
+                fastlist_del_entry(entry->list_head, entry);
+            }
+        }
+    }
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv4,ipv6¿ìËÙת·¢ÐÅÏ¢£¬±£ÁôÔ­ctÁ¬½Ó
+void fast_cleanup_links(fast_list_t *list_head)
+{
+    fast_entry_t *entry, *next;
+
+    for (entry = list_head->next; entry; entry = next)
+    {
+        next = entry->next;
+        //ɾ³ýentry×Ô¶¨ÒåµÄ¶¨Ê±Æ÷
+        del_timer(&entry->timeout);
+
+        workinghash_del_node(entry);
+
+        //»Ö¸´ctµÄ³¬Ê±
+        //add_timer(&entry->ct->timeout);
+        fn_list_del(list_head, entry);
+
+    }
+}
+
+void athr_fast_dump(int ctl)
+{
+
+}
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt_fast = {
+    .id	= NF_CT_DEFAULT_ZONE_ID,
+    .dir	= NF_CT_DEFAULT_ZONE_DIR,
+};
+
+/* ***************** ¿ìËÙת·¢Í³Ò»´¦Àíº¯Êý ********************************/
+/* ²éѯÂú×ã¿ìËÙת·¢µÄctÐÅÏ¢ --- ²Î¿¼nf_conntrack_inʵÏÖ */
+struct nf_conn *skb_get_ct(struct nf_conn **tmpl,
+                           struct sk_buff *skb,
+                           struct nf_conntrack_l4proto **l4proto,
+                           unsigned int *dataoff,
+                           u_int8_t pf,
+                           unsigned int hooknum,
+                           int *dir,
+                           u_int8_t *protonum)
+{
+    int ret;
+    struct nf_conntrack_tuple tuple;
+    struct nf_conntrack_tuple_hash *h;
+    struct nf_conn *ct;
+    struct nf_conntrack_zone * zone;
+    enum ip_conntrack_info ctinfo;
+    struct nf_conntrack_zone tmp;
+    u32 hash;
+
+
+    //ÒÔϲο¼nf_conntrack_inʵÏÖ²éѯÒÑÓÐct
+    *tmpl = nf_ct_get(skb, &ctinfo);
+    if (*tmpl || ctinfo == IP_CT_UNTRACKED) {
+        /* Previously seen (loopback or untracked)?  Ignore. */
+        if ((*tmpl && !nf_ct_is_template(*tmpl)) ||
+                ctinfo == IP_CT_UNTRACKED) {
+            goto err_out;
+        }
+        skb->_nfct = 0;
+    }
+
+    *dataoff = get_l4proto_fast(skb, skb_network_offset(skb), pf, protonum);
+
+    if (*dataoff <= 0) {
+        goto err_out;
+    }
+
+
+    *l4proto = nf_ct_l4proto_find(*protonum);
+
+
+    if (*protonum != IPPROTO_TCP && *protonum != IPPROTO_UDP)
+        goto err_out;
+
+    if (!nf_ct_get_tuple_fast(skb, skb_network_offset(skb),
+                              *dataoff, pf, *protonum, &init_net, &tuple)) {
+        goto err_out;
+    }
+
+
+    zone = nf_ct_zone_tmpl(*tmpl, skb, &tmp);
+    hash = hash_conntrack_raw_fast(&tuple, &init_net);
+
+    h = nf_conntrack_find_fast(&init_net, zone, &tuple, hash);
+
+    if (!h || IS_ERR(h))
+        goto err_out;
+
+    ct = nf_ct_tuplehash_to_ctrack(h);
+
+
+    if (!ct || IS_ERR(ct)) {
+        goto err_out;
+    }
+
+    if (test_bit(IPS_DYING_BIT, &ct->status) || test_bit(IPS_UNTRACKED_BIT, &ct->status))
+    {
+        nf_conntrack_put(&ct->ct_general);
+        goto err_out;
+    }
+
+    if (*tmpl && *tmpl == ct)
+    {
+        nf_conntrack_put(&ct->ct_general);
+        goto err_out;
+    }
+
+    //TCP±ØÐëË«Ïò½¨Á´ºó²Å×ß¿ìËÙת·¢
+    if (IPPROTO_TCP == *protonum && !test_bit(IPS_ASSURED_BIT, &ct->status))
+    {
+        nf_conntrack_put(&ct->ct_general);
+        goto err_out;
+    }
+
+    if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
+        *dir = 1;
+    } else {
+        *dir = 0;
+    }
+    return ct;
+
+err_out :
+    print_sun(SUN_DBG, "skb : 0x%x, skb_get_ct fail!!!!!!!!!!", skb);
+    if (*tmpl) {
+        nf_ct_set(skb, (struct nf_conn *)&((*tmpl)->ct_general), ctinfo);
+    }
+    else {
+        skb->_nfct = 0;
+    }
+    return NULL;
+}
+
+//еÄfastģʽ´¦Àí·½Ê½
+int fast_for_ip_new(struct sk_buff *skb,
+                    int(*fast_fw)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+                                  struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+                    int(*fast_local_proc)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+                            struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+                    int proto)
+{
+    struct nf_conn *ct = NULL, *tmpl = NULL;
+    struct nf_conntrack_l4proto *l4proto;
+    unsigned int dataoff;
+    u_int8_t protonum;
+    int dir = 0;
+    int ret = 0;
+
+    if (proto == ETH_P_IP)
+        ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET, NF_INET_PRE_ROUTING, &dir, &protonum);
+    else if (proto == ETH_P_IPV6)
+        ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET6, NF_INET_PRE_ROUTING, &dir, &protonum);
+    if (!ct)
+    {
+        if (fast_br_proc && fast_br_proc(skb))
+        {
+            fastbr_num++;
+            return 1;
+        }
+        return 0;
+    }
+
+    spin_lock_bh(&fast_fw_spinlock);
+    /*TCP±ØÐëÈý´ÎÎÕÊֳɹ¦¡¢fast½¨Á´³É¹¦*/
+    if (IPPROTO_TCP == protonum || NEXTHDR_TCP == protonum)
+    {
+        int rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+        if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
+            nf_conntrack_put(&ct->ct_general);
+            spin_unlock_bh(&fast_fw_spinlock);
+            return 0;
+        }
+        if (!(ct->fast_ct.fast_dst[dir] && ct->fast_ct.fast_dst[rdir])) {
+            nf_conntrack_put(&ct->ct_general);
+            spin_unlock_bh(&fast_fw_spinlock);
+            return 0;
+        }
+    }
+    switch(ct->fast_ct.isFast)
+    {
+    case FAST_CT_FW4:
+    case FAST_CT_FW6:
+        if (fast_fw && fast_fw(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+            if (proto == ETH_P_IP)
+            {
+                fastnat_num++;
+            }
+
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            ret = 1;
+            break;
+        }
+        ret = 0;
+        break;
+    case FAST_CT_LOCAL4:
+    case FAST_CT_LOCAL6:
+        if (fast_local_proc && fast_local_proc(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+            if (proto == ETH_P_IP)
+                fast_local4_rcv_num++;
+            else if (proto == ETH_P_IPV6)
+                fast_local6_rcv_num++;
+            ret = 1;
+            break;
+        }
+        ret = 0;
+        break;
+    default:
+        nf_conntrack_put(&ct->ct_general);
+        if (fast_br_proc && fast_br_proc(skb)) {
+            fastbr_num++;
+            ret = 1;
+            break;
+        }
+        ret = 0;
+        break;
+    }
+
+    spin_unlock_bh(&fast_fw_spinlock);
+    return ret;
+}
+
+//¾ÉµÄfastģʽ´¦Àí·½Ê½
+int fast_for_ip(struct sk_buff *skb, int(*fast_fw)(struct sk_buff *),
+                int(* fast_local_proc)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+                                       struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+                int proto)
+{
+    struct nf_conn *ct = NULL, *tmpl = NULL;
+    struct nf_conntrack_l4proto *l4proto;
+    unsigned int dataoff;
+    u_int8_t protonum;
+    int dir = 0;
+    //ת·¢¡¢ÇŽӡ¢±¾µØË³Ðò´¦Àí
+    if (fast_fwd_level > fast_br_level && fast_br_level > fast_local_level)
+    {
+        if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+            {
+                fastnat_num++;
+            }
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+        else if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+    }
+    //ת·¢¡¢±¾µØ¡¢ÇŽÓ˳Ðò´¦Àí
+    else if (fast_fwd_level > fast_local_level && fast_local_level > fast_br_level)
+    {
+        if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+        else if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+    }
+    //ÇŽӡ¢×ª·¢¡¢±¾µØË³Ðò´¦Àí
+    else if (fast_br_level > fast_fwd_level && fast_fwd_level > fast_local_level)
+    {
+        if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+        else if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+    }
+    //ÇŽӡ¢±¾µØ¡¢×ª·¢Ë³Ðò´¦Àí
+    else if(fast_br_level > fast_local_level && fast_local_level > fast_fwd_level)
+    {
+        if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+        else if (fast_local_proc)
+        {
+            if (proto == ETH_P_IP)
+                ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET, NF_INET_PRE_ROUTING, &dir, &protonum);
+            else if (proto == ETH_P_IPV6)
+                ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET6, NF_INET_PRE_ROUTING, &dir, &protonum);
+
+            if(!ct)
+                return 0;
+
+            if (fast_local_proc(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+                if (proto == ETH_P_IP)
+                    fast_local4_rcv_num++;
+                else if (proto == ETH_P_IPV6)
+                    fast_local6_rcv_num++;
+                return 1;
+            }
+        }
+        else if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num ++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+    }
+    //±¾µØ¡¢×ª·¢¡¢ÇŽÓ˳Ðò´¦Àí
+    else if(fast_local_level > fast_fwd_level && fast_fwd_level > fast_br_level)
+    {
+        if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+        else if (fast_br_proc == 1 && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+    }
+    //±¾µØ¡¢ÇŽӡ¢×ª·¢Ë³Ðò´¦Àí
+    else if(fast_local_level > fast_br_level && fast_br_level > fast_fwd_level)
+    {
+        if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+        else if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+    }
+    return 0;
+}
+
+int btrunk_fw = 0;
+module_param(btrunk_fw, int, 0644);
+//extern int fast_fwd_ip4addr_conflict(struct sk_buff *skb);
+//extern int fast_for_multicast(struct sk_buff *skb);
+/*ÓÉÓÚ¿ÉÄܲ»´æÔÚMACÖ¡Í·£¬ÈçPSÍø¿Ú£¬ËùÒÔÐèҪͨ¹ýIPͷʶ±ð³öskb->protocolÖµ*/
+int fast_for_ipdata(struct sk_buff *skb)
+{
+
+    struct iphdr *iph;
+
+    if (skb->len > 1000)
+        skb_big_num++;
+    else if (skb->len < 100)
+        skb_small_num++;
+
+    if (skb->dev == NULL)
+        return 0;
+    if (skb->protocol == htons(ETH_P_IP)) //ipv4
+    {
+        skb_num4++;
+        skb_bytes4 += skb->len;
+
+        if(btrunk_fw && fast_fwd_ip4addr_conflict(skb) == 1)
+        {
+            return 1;
+        }
+        //Ö÷²¥µÄ¿ìËÙת·¢£¬´ýʵÏÖ£¬ÔÝʱֻ×öͳ¼Æ
+        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+        {
+            multicast_num4++;
+            if(btrunk_fw && fast_for_multicast(skb) == 1)
+            {
+                return 1;
+            }
+            return 0;
+        }
+        //¹ã²¥²»Ö§³Ö¿ìËÙת·¢£¬Ö»×öͳ¼Æ
+        else if (ipv4_is_lbcast(ip_hdr(skb)->daddr)) {
+            broadcast_num4++;
+            return 0;
+        }
+
+        if (ip_is_fragment(ip_hdr(skb)))
+        {
+            skbinfo_add(NULL, SKB_FRAG);
+            return 0;
+        }
+
+        //Ö»ÓÐTCPºÍUDP½øÐпìËÙת·¢
+        if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+        {
+            return 0;
+        }
+
+        if(!fast_iphdr_check(skb, ETH_P_IP))
+        {
+            iphdr_err_num++;
+            kfree_skb(skb);
+            return 1;
+        }
+
+        /*Èç¹ûÓÐpadding£¬ÔòÈ¥³ýskbβ²¿µÄpadding, ²Î¿¼ip_rcv*/
+        skb_trim(skb, ntohs(ip_hdr(skb)->tot_len));
+
+        /* tcpÍ·²¿ÐÅÏ¢¼à²â*/
+        if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
+            if (!fast_tcphdr_check(skb, ETH_P_IP))	{
+                tcphdr_err_num++;
+                //kfree_skb(skb);
+                return 0;
+            }
+        }
+        if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+            return fast_for_ip(skb, fast_nat4_proc, fast_local4_proc, ETH_P_IP);
+        else
+            return fast_for_ip_new(skb, fast_fw4_proc, fast_local4_proc, ETH_P_IP);
+
+    }
+    else if (skb->protocol == htons(ETH_P_IPV6)) //ipv6
+    {
+        skb_num6++;
+        skb_bytes6 += skb->len;
+
+        //×é²¥µÄ¿ìËÙת·¢£¬´ýʵÏÖ£¬ÔÝʱֱ½Ó·µ»Ø¿ìËÙת·¢Ê§°Ü
+        if(ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+        {
+            multicast_num6++;
+            return 0;
+        }
+
+        if(!fast_iphdr_check(skb, ETH_P_IPV6))
+        {
+            ip6hdr_err_num++;
+            kfree_skb(skb);
+            return 1;
+        }
+        /*Èç¹ûÓÐpadding£¬ÔòÈ¥³ýskbβ²¿µÄpadding£¬²Î¿¼ipv6_rcv*/
+        skb_trim(skb, ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr));
+        /* tcpÍ·²¿ÐÅÏ¢¼à²â*/
+        if (!fast_tcphdr_check(skb, ETH_P_IPV6))	{
+            tcp6hdr_err_num++;
+            //kfree_skb(skb);
+            return 0;
+        }
+        if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+            return fast_for_ip(skb, fast_nat6_proc, fast_local6_proc, ETH_P_IPV6);
+        else
+            return fast_for_ip_new(skb, fast_fw6_proc, fast_local6_proc, ETH_P_IPV6);
+    }
+    else
+        skb_unknown++;
+    return 0;
+}
+
+/*skbÖ¸ÕëÌø×ªµ½IPÍ·*/
+static int set_skbdata_toip(struct sk_buff *skb)
+{
+    __be16 next_pro = skb->protocol;
+again:
+    if (next_pro == htons(ETH_P_IP) || next_pro == htons(ETH_P_IPV6))
+    {
+        skb_set_network_header(skb, 0);
+        skb_reset_mac_len(skb);
+        skb->protocol = next_pro;
+        return 1;
+    }
+    //vlan
+    else if (next_pro == cpu_to_be16(ETH_P_8021Q))
+    {
+        skb->isvlan = 1;
+        skb_pull(skb, VLAN_HLEN);
+        next_pro = *((__be16 *)(skb->data - 2));
+        goto again;
+    }
+
+    //pppoe
+    else if (next_pro == htons(ETH_P_PPP_SES))
+    {
+        if (*(skb->data + 6) == 0x00 && *(skb->data + 7) == 0x21)
+        {
+            next_pro = htons(ETH_P_IP);
+            __skb_pull(skb, PPPOE_HEADER_LEN);
+            goto again;
+        }
+        else if(*(skb->data+ 6) == 0x00 && *(skb->data + 7) == 0x57)
+        {
+            next_pro = htons(ETH_P_IPV6);
+            __skb_pull(skb, PPPOE_HEADER_LEN);
+            goto again;
+        }
+    }
+    return 0;
+}
+
+/*¶ÔÄÚºËdev.cÖÐÊý¾Ý½øÐпìËÙ´¦Àí£¬ÓÐip°ü¡¢ppp°üµÈ*/
+static int try_fast_for_netcoredata(struct sk_buff *skb)
+{
+    __be16 old_pro = skb->protocol;
+    unsigned int old_len = skb->len;
+    unsigned char * old_data = skb->data;
+    __be16 old_netheader = skb->network_header;
+
+    //ipÍ·4×Ö½Ú¶ÔÆë
+    //if (((unsigned long)skb->data)%4 != 0)
+    //panic("ERR: fast from dev skb->data%4 != 0");
+
+    if (skb->indev == NULL)
+        skb->indev = skb->dev;
+    //ÐèҪ׼ȷ¶¨Î»µ½IPÍ·£¬ÆÚ¼ä¿ÉÄÜÌø¹ýppp/mac/pppoeµÈ¸÷ÖÖ²ã2Í·²¿
+    if (set_skbdata_toip(skb) == 1 && fast_for_ipdata(skb))
+        return 1;
+
+    //Èç¹û¿ìËÙ´¦Àíʧ°Ü£¬±ØÐëͨ¹ýÈçϸ³Öµ·µ»Ø»ØÔ­Ê¼skb²ÎÊýÖµ£¬½»Óɱê×¼linuxÄں˴¦Àí
+    skb->protocol = old_pro;
+    skb->data = old_data;
+    skb->len = old_len;
+    skb->network_header = old_netheader;
+    return 0;
+}
+
+/*¶ÔskbÖÐÖ¸ÏòMACÖ¡Í·µÄÇý¶¯Éϱ¨Êý¾Ý½øÐпìËÙ´¦Àí£¬¿ÉÓÃÓÚËùÓÐÒÔÌ«Íø¼Ü¹¹µÄÍøÂçÉ豸Éϱ¨Êý¾ÝµÄ¿ìËÙ´¦Àí*/
+static int try_fast_for_macdata(struct sk_buff *skb, struct net_device *dev)
+{
+    /*
+    struct ethhdr *eth;
+    if (!(skb->network_header == 0 || skb->network_header == ~0U))
+        panic("network_header    ERR!!!!!!!!!!\n");
+    skb->dev = dev;
+    if (skb->indev == NULL)
+        skb->indev = dev;
+    skb_reset_mac_header(skb);
+    eth = eth_hdr(skb);
+    skb->protocol = eth->h_proto;
+    skb_pull(skb, ETH_HLEN);
+    */
+
+    //if (tsp_mirror_handle)
+        //tsp_mirror_handle(skb);
+
+    //×ÔÑÐtcp/udp°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ£¬ÐèÒªserver¶ÎÄܰ²×°×ÔÑÐÓ¦Óã¬Èç¹û¿ìËÙת·¢Ê§°Ü£¬ÐèÒªÇå³þ¼Ç¼
+    //net_dbg_perf_dev_recv((char *)skb, skb->dev->name);
+    if (try_fast_for_netcoredata(skb))
+    {
+        return 1;
+    }
+    //×ÔÑÐtcp/udp°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ£¬ÐèÒªserver¶ÎÄܰ²×°×ÔÑÐÓ¦ÓÃ
+    //net_dbg_perf_clear_last_item(skb);
+
+    //skb_push(skb, ETH_HLEN);
+    return 0;
+}
+
+EXPORT_SYMBOL_GPL(tsp_mirror_handle);
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+static int fast_event(struct notifier_block *this, unsigned long event, struct net_device *dev)
+{
+    traverse_command_t cmd;
+
+    switch (event) {
+    case NETDEV_DOWN:
+        if (dev)
+        {
+            cmd.cmd = TRAVERSE_CMD_DEV_DOWN;
+            cmd.arg = dev->ifindex;
+
+            fastnat_event(&cmd);
+            fast6_event(&cmd);
+        }
+        break;
+    }
+    return NOTIFY_DONE;
+}
+
+static int fast_device_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+    struct net_device *dev = (struct net_device *)ptr;
+
+    return fast_event(this, event, dev);
+}
+
+static int fast_inet_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+    struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
+
+    return fast_event(this, event, dev);
+}
+
+/*priority should be higher than masquerade, otherwise kernel will hang*/
+static struct notifier_block fast_dev_notifier = {
+    .notifier_call    = fast_device_event,
+    .priority = 1,
+};
+
+/*priority should be higher than masquerade, otherwise kernel will hang*/
+static struct notifier_block fast_inet_notifier = {
+    .notifier_call    = fast_inet_event,
+    .priority = 1,
+};
+
+void fast_device_down_event_by_name(char *dev_name)
+{
+    struct net_device *dev = NULL;
+
+    if (!dev_name)
+    {
+        print_sun(SUN_ERR,"fast_device_down_event_by_name dev_name is null \n");
+        return;
+    }
+
+    dev = dev_get_by_name(&init_net, dev_name);
+    if (!dev)
+    {
+        print_sun(SUN_ERR,"fast_device_down_event_by_name dev not found \n");
+        return;
+    }
+
+    fast_event(NULL, NETDEV_DOWN, dev);
+
+    /*add by jiangjing*/
+    dev_put(dev);
+}
+
+/**** ÒÔϲ¿·ÖÊÇеÄfastģʽʹÓú¯Êý ****/
+extern void fast_local_conn_release(struct nf_conn *ct);
+extern void fast_local_sock_release(struct sock *sk);
+
+
+/* ½«¿ìËÙת·¢¹ØÁªµÄct¼Óµ½sock¼Ç¼ÖÐ */
+void fast_dst_add_ct(struct dst_entry *dst, struct nf_conn *ct)
+{
+    struct conn_list *entry;
+    int conn_flag = 0;
+
+    list_for_each_entry_rcu(entry, &dst->conn_head, list)
+    {
+        if (entry->nfct == ct)
+        {
+            conn_flag = 1;
+            break;
+        }
+    }
+
+    if (conn_flag == 0)
+    {
+        struct conn_list *conn_list_node =(struct conn_list*)kzalloc(sizeof(struct conn_list), GFP_KERNEL);
+        if(conn_list_node) {
+            rcu_assign_pointer(conn_list_node->nfct, ct);
+            list_add_rcu(&conn_list_node->list, &dst->conn_head);
+        }
+    }
+}
+
+/* ¸ù¾Ýnet_deviceµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ */
+void fast_fw_conn_release_by_dev(struct net_device* dev)
+{
+    int hash = 0;
+    struct nf_conntrack_tuple_hash *h;
+    struct hlist_nulls_node *n;
+    struct nf_conn *ct;
+    int dir;
+    struct net_device *net;
+
+    if(fastnat_level == FAST_CLOSE)
+        return ;
+
+    rcu_read_lock();
+    for (hash = 0; hash < nf_conntrack_htable_size; hash++)
+    {
+        local_bh_disable();
+        hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnnode)
+        {
+            if (h)
+            {
+                ct = nf_ct_tuplehash_to_ctrack(h);
+
+                //ÊÍ·Å´ËÁ´½ÓÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+                spin_lock_bh(&fast_fw_spinlock);
+                for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+                {
+                    net = ct->fast_ct.fast_brport[dir];
+                    if (net != NULL)
+                    {
+                        if(!strcmp(dev->name, net->name))
+                        {
+                            ct->fast_ct.fast_brport[dir] = NULL;
+                        }
+                    }
+                }
+                spin_unlock_bh(&fast_fw_spinlock);
+            }
+        }
+        local_bh_enable();
+    }
+    rcu_read_unlock();
+}
+/* ¸ù¾ÝconnµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ */
+void fast_fw_conn_release(struct nf_conn *ct)
+{
+    struct dst_entry *dst;
+    struct conn_list *entry;
+    int dir;
+
+    for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+    {
+        if (!(dst = dst_get_by_ct(ct, dir)))
+            continue;
+
+        list_for_each_entry_rcu(entry, &dst->conn_head, list)
+        {
+            if (entry->nfct == ct)
+            {
+                entry->nfct = NULL;
+                __list_del_entry(&entry->list);
+                kfree(entry);
+                break;
+            }
+        }
+        //ÔÚdst_get_by_ctÖÐholdһϣ¬ËùÒÔÕâÀïÒªrelease
+        dst_release(dst);
+        rcu_assign_pointer(ct->fast_ct.fast_dst[dir], NULL);
+        ct->fast_ct.fast_brport[dir] = NULL;
+    }
+    ct->fast_ct.isFast = 0;
+}
+
+//¸ù¾Ýdst_entryµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ
+void fast_fw_dst_entry_release(struct dst_entry *dst)
+{
+    struct conn_list *entry = NULL;
+    struct conn_list *entry_tmp = NULL;
+    struct nf_conn *ct;
+    struct list_head *tmp;
+
+    list_for_each_entry_safe(entry, entry_tmp, &dst->conn_head, list) {
+
+        rcu_assign_pointer(ct, entry->nfct);
+        if (!ct)
+            continue;
+
+        if (ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL] && ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL] == dst) {
+            rcu_assign_pointer(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL], NULL);
+            ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL]   = NULL;
+        }
+        else if (ct->fast_ct.fast_dst[IP_CT_DIR_REPLY] && ct->fast_ct.fast_dst[IP_CT_DIR_REPLY] == dst) {
+            rcu_assign_pointer(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY], NULL);
+            ct->fast_ct.fast_brport[IP_CT_DIR_REPLY]      = NULL;
+        }
+        else
+            print_sun(SUN_ERR,"fast_fw_dst_entry_release \n");
+
+        if (!ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL] && !ct->fast_ct.fast_brport[IP_CT_DIR_REPLY])
+            ct->fast_ct.isFast = 0;
+        entry->nfct = NULL;
+        __list_del_entry(&entry->list);
+        kfree(entry);
+    }
+}
+
+/* connÊÍ·Å֪ͨº¯Êý£¬Í¨ÖªfastÊÍ·ÅÏà¹ØÄÚÈÝ */
+void fast_conn_release(struct nf_conn *ct, int mark)
+{
+    spin_lock_bh(&fast_fw_spinlock);
+    if ((ct->fast_ct.isFast == FAST_CT_FW4 || ct->fast_ct.isFast == FAST_CT_FW6) && (mark & RELEASE_ALL_DST))
+    {
+        fast_fw_conn_release(ct);
+    }
+    spin_unlock_bh(&fast_fw_spinlock);
+}
+
+/* dst_entryÊÍ·Å֪ͨº¯Êý£¬Í¨ÖªfastÊÍ·ÅÏà¹ØÄÚÈÝ */
+void fast_dst_entry_release(struct dst_entry * dst)
+{
+    spin_lock_bh(&fast_fw_spinlock);
+    fast_fw_dst_entry_release(dst);
+    spin_unlock_bh(&fast_fw_spinlock);
+}
+
+/* ÊÍ·ÅÐÂfastģʽÏÂËùÓпìËÙת·¢ÐÅÏ¢: Ö»ÓÐctÊǺÍdst¡¢sk¶¼¹ØÁªµÄ£¬ËùÒÔͨ¹ýctÀ´²éѯ */
+void fast_release_all(int mark)
+{
+    int hash = 0;
+    struct nf_conntrack_tuple_hash *h;
+    struct hlist_nulls_node *n;
+    struct nf_conn *ct;
+
+    rcu_read_lock();
+    for (hash = 0; hash < nf_conntrack_htable_size; hash++) {
+        local_bh_disable();
+        hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnnode) {
+            if (h)
+            {
+                ct = nf_ct_tuplehash_to_ctrack(h);
+                if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+                    continue;
+
+                //ÊÍ·Å´ËÁ´½ÓÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+                fast_conn_release(ct, mark);
+
+                nf_ct_put(ct);
+            }
+        }
+        local_bh_enable();
+    }
+    rcu_read_unlock();
+}
+
+/**** ÒÔϲ¿·ÖÊÇоÉfastģʽ¹²Óõĺ¯Êý ****/
+
+//¸ù¾ÝÓÅÏȼ¶ÅäÖò»Í¬²ã´ÎµÄ¹³×Óº¯Êý£¬Ä¿Ç°µÄ²ßÂÔÊǸߵȼ¶µÄ¹³×Óº¯Êý±»¸³Öµºó£¬µÍµÈ¼¶µÄ¹³×Óº¯ÊýÒ²Ò»¶¨´æÔÚ£¬
+//ÒÔ½â¾öijЩÉ豸ûÓиߵȼ¶µÄ¹³×Óº¯ÊýÇé¿ö£»¸Ã²ßÂÔ½ö»áÔì³É¹³×Óº¯Êý¶à´ÎÆ¥Åäʧ°ÜµÄ¿ÕÅÜ£¬²»»á´æÔÚÐÔÄÜÆ¿¾±
+void set_fast_level_cb(int param)
+{
+    //¸ù¾Ý¿ìËÙת·¢¼¶±ðÉèÖù©Íⲿµ÷Óú¯ÊýÖ¸Õë
+    if (param == FAST_CLOSE || param == FAST_CLOSE_KEEP_LINK)  //¹Ø±Õ¿ìËÙת·¢
+    {
+        fast_from_softirq = NULL;
+        fast_from_driver = NULL;
+    }
+    else if (param == FAST_NET_CORE) //Äں˲ã¿ìËÙת·¢£¬FAST_NEWÖ»Ö§³ÖÈíÖжÏÖе÷ÓÿìËÙת·¢
+    {
+        fast_from_softirq = try_fast_for_netcoredata;
+        fast_from_driver = NULL;
+    }
+    //net_deviceµ½net_device£¬²»½øÈëIPÈíÖжÏ
+    else if (param == FAST_NET_DEVICE)
+    {
+        fast_from_softirq = try_fast_for_netcoredata;
+        fast_from_driver = try_fast_for_macdata;
+    }
+    else
+        print_sun(SUN_ERR,"fastnat_level error, shoud be 0~2!\n");
+}
+
+/* ÉèÖø÷×Ó¹¦ÄܵĿìËÙת·¢µ÷Óú¯Êý */
+void set_fast_switch_cb(unsigned long param)
+{
+    //¸ù¾ÝоɿìËÙת·¢ÉèÖÃʹÓõIJ»Í¬ipv4¡¢ipv6½Ó¿Ú
+    if (test_bit(FAST_TYPE_VERSION_BIT, &param)) //еÄfastģʽ
+    {
+        //еÄfastģʽÏÂ: ÉèÖÃIPv4±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+        if (test_bit(FAST_TYPE_FW4_BIT, &fast_switch))
+            fast_fw4_proc = fast4_fw_recv;
+        else
+            fast_fw4_proc = NULL;
+
+        //еÄfastģʽÏÂ: ÉèÖÃIPv6±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+        if (test_bit(FAST_TYPE_FW6_BIT, &fast_switch))
+            fast_fw6_proc = fast6_fw_recv;
+        else
+            fast_fw6_proc = NULL;
+
+        fast_local4_proc        = NULL;
+        fast_local4_output_proc = NULL;
+
+        fast_local6_proc        = NULL;
+        fast_local6_output_proc = NULL;
+
+        //ÉèÖÃ×ÓÍø¼ä¿ìËÙת·¢»Øµ÷º¯Êý
+        if (test_bit(FAST_TYPE_BR_BIT, &fast_switch))
+            fast_br_proc = fast_br;
+        else
+            fast_br_proc = NULL;
+
+        fast_nat4_proc = NULL;
+        fast_nat6_proc = NULL;
+    }
+    else //ÀϵÄfastģʽ
+    {
+        //ÀϵÄfastģʽÏÂ: ÉèÖÃIPv4±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+        fast_nat4_proc = fast_nat_recv;
+
+        //ÀϵÄfastģʽÏÂ: ÉèÖÃIPv6±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+        fast_nat6_proc = fast6_recv;
+
+        fast_br_proc = fast_br;
+        fast_local4_proc         = NULL;
+        fast_local4_output_proc  = NULL;
+        fast_local6_proc         = NULL;
+        fast_local6_output_proc  = NULL;
+        fast_fw4_proc = NULL;
+        fast_fw6_proc = NULL;
+    }
+}
+
+void fast_level_change(int new_level)
+{
+    int old_level = 0;
+
+    old_level = fastnat_level;
+
+    if (old_level == new_level)
+        return;
+
+    fastnat_level = new_level;
+
+    //¸ù¾Ý×îеÄfastnat level£¬µ÷Õû»Øµ÷º¯Êý
+    set_fast_level_cb(fastnat_level);
+
+    //fastnat¹Ø±Õ£¬Çå¿ÕËùÓÐÐÅÏ¢
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+        {
+            fastnat_cleanup_links();
+            fast6_cleanup_links();
+        }
+        else
+        {
+            fast_release_all(RELEASE_ALL_DST | RELEASE_ALL_SK);
+        }
+    }
+}
+
+void fast_switch_change(unsigned long new_switch)
+{
+    unsigned long old_switch = fast_switch;
+
+    if (old_switch == new_switch)
+        return;
+
+    fast_switch = new_switch;
+
+    //¸ù¾Ý×Ó¹¦ÄÜ¿ìËÙת·¢µÄλͼ¿ª¹Ø£¬ÉèÖÃ×Ó¹¦Äܻص÷º¯Êý
+    set_fast_switch_cb(fast_switch);
+
+    //×Ó¹¦ÄÜ¿ìËÙת·¢´Ó´ò¿ªµ½¹Ø±ÕµÄ£¬ÐèÒªÇå³þÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+    //´ÓÀϵÄfastÇе½ÐµÄfast£¬ÐèÒªÇå³þÀÏfast±£´æµÄÏà¹Ø¿ìËÙת·¢Êý¾Ý
+    if (!test_bit(FAST_TYPE_VERSION_BIT, &old_switch) && test_bit(FAST_TYPE_VERSION_BIT, &new_switch))
+    {
+        //ÀϵÄfast²»Çø·ÖIPv4¡¢IPv6µÄ·Ö¿ª¿ØÖÆ
+        fastnat_cleanup_links();
+
+        fast6_cleanup_links();
+    }
+    //´ÓеÄfastÇе½¾ÉµÄfast£¬ÐèÒªÇå³þÐÂfast±£´æµÄÏà¹Ø¿ìËÙת·¢Êý¾Ý
+    else if (test_bit(FAST_TYPE_VERSION_BIT, &old_switch) && !test_bit(FAST_TYPE_VERSION_BIT, &new_switch))
+    {
+        fast_release_all(RELEASE_ALL_DST | RELEASE_ALL_SK);
+    }
+
+    if (test_bit(FAST_TYPE_VERSION_BIT, &new_switch) && (!test_bit(FAST_TYPE_FW4_BIT, &new_switch) || !test_bit(FAST_TYPE_FW6_BIT, &new_switch))) {
+        fast_release_all(RELEASE_ALL_DST);
+    }
+    if (test_bit(FAST_TYPE_VERSION_BIT, &new_switch) && (!test_bit(FAST_TYPE_LOCAL4_BIT, &new_switch) || !test_bit(FAST_TYPE_LOCAL6_BIT, &new_switch))) {
+        fast_release_all(RELEASE_ALL_SK);
+    }
+}
+
+//¼Ç¼ÓÐDST_NOCACHE±êÖ¾µÄdst³öÏֵĴÎÊý
+int no_cache = 0;
+//¼Ç¼ÔÚʹÓÃdstʱ£¬dst->neighbourΪ¿ÕµÄ´ÎÊý
+int no_neighbour = 0;
+struct dst_entry * dst_get_by_ct(struct nf_conn * ct, int dir)
+{
+    struct dst_entry *dst;
+    struct neighbour *_neighbour = NULL;
+
+    rcu_read_lock();
+    dst = rcu_dereference_protected(ct->fast_ct.fast_dst[dir], 1);
+
+    if(dst)
+        dst_hold_and_use(dst, jiffies);
+    else {
+        dst = NULL;
+    }
+    rcu_read_unlock();
+    return dst;
+}
+
+/*fast³õʼ»¯*/
+static int __init
+tsp_fast_init(void)
+{
+    int ret4 = 0, ret6 = 0;
+
+    memset(zeromac, 0, sizeof(zeromac));
+
+    spin_lock_init(&fast_fw_spinlock);
+    //²ÉÓÃרÊôslab»úÖÆ
+    fast_head_cache = kmem_cache_create("fast_head_cache",
+                                        sizeof(struct fast_entry_s),
+                                        0,
+                                        SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+                                        NULL);
+
+    //·Ö±ðµ÷ÓÃipv4¡¢ipv6µÄ³õʼ»¯º¯Êý
+    ret4 = tsp_fastnat_init();
+    ret6 = tsp_fast6_init();
+    fast4_fw_init();
+    fast6_fw_init();
+
+    if ((ret4 != 0) && (ret6 != 0))
+        return -EINVAL;
+
+    /*×¢²á֪ͨÁ´*/
+    register_netdevice_notifier(&fast_dev_notifier);
+    register_inetaddr_notifier(&fast_inet_notifier);
+
+    //¿ìËÙת·¢ºÍƽ̨»¯procÎļþ³õʼ»¯
+    set_fast_level_cb(fastnat_level);
+    set_fast_switch_cb(fast_switch);
+    fast_conntrack_init_proc();
+
+    net_adapter_init_proc();
+    return 0;
+}
+
+static void __exit
+tsp_fast_cleanup(void)
+{
+    set_fast_level_cb(FAST_CLOSE);
+    set_fast_switch_cb(0);
+    unregister_netdevice_notifier(&fast_dev_notifier);
+    unregister_inetaddr_notifier(&fast_inet_notifier);
+
+    //·Ö±ðµ÷ÓÃipv4¡¢ipv6µÄ×¢Ïúº¯Êý
+    tsp_fastnat_cleanup();
+    tsp_fast6_cleanup();
+    fast4_fw_cleanup();
+    fast6_fw_cleanup();
+}
+
+late_initcall(tsp_fast_init);
+module_exit(tsp_fast_cleanup);
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast_track.c b/upstream/linux-5.10/net/core/fastproc/fast_track.c
new file mode 100755
index 0000000..37f3cfc
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast_track.c
@@ -0,0 +1,1203 @@
+#include <linux/types.h>

+#include <linux/netfilter.h>

+#include <linux/slab.h>

+#include <linux/module.h>

+#include <linux/proc_fs.h>    /* Necessary because we use proc fs */

+#include <linux/skbuff.h>

+#include <linux/proc_fs.h>

+#include <linux/seq_file.h>

+#include <linux/percpu.h>

+#include <linux/netdevice.h>

+#include <linux/security.h>

+#include <net/net_namespace.h>

+#ifdef CONFIG_SYSCTL

+#include <linux/sysctl.h>

+#endif

+#include <linux/rculist_nulls.h>

+#include <net/netfilter/nf_conntrack.h>

+#include <net/netfilter/nf_conntrack_core.h>

+#include <net/netfilter/nf_conntrack_l4proto.h>

+#include <net/netfilter/nf_conntrack_expect.h>

+#include <net/netfilter/nf_conntrack_helper.h>

+#include <net/netfilter/nf_conntrack_acct.h>

+#include <net/netfilter/nf_conntrack_zones.h>

+#include <net/netfilter/nf_conntrack_timestamp.h>

+#include <net/SI/fastnat.h>

+#include <net/SI/fast6.h>

+#include <net/SI/fast_common.h>

+#include <net/SI/netioc_proc.h>

+

+

+#define PORT_LEN 10

+extern int fastnat_ack_param;

+extern int ackdrop_maxnum;

+extern unsigned int ct_iptables_syn_sw;

+

+

+static unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)

+{

+    struct nf_conn_acct *acct;

+    struct nf_conn_counter *counter;

+

+    acct = nf_conn_acct_find(ct);

+    if (!acct)

+        return 0;

+

+    counter = acct->counter;

+    seq_printf(s, "packets=%llu bytes=%llu ",

+               (unsigned long long)atomic64_read(&counter[dir].packets),

+               (unsigned long long)atomic64_read(&counter[dir].bytes));

+

+    return 0;

+}

+

+static void *fastnat_level_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *fastnat_level_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void fastnat_level_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+/* return 0 on success, 1 in case of error */

+static int fastnat_level_seq_show(struct seq_file *s, void *v)

+{

+    seq_printf(s, "fastnat_level: %d\n", fastnat_level);

+    return 0;

+}

+

+static const struct seq_operations fastnat_level_seq_ops = {

+    .start = fastnat_level_seq_start,

+    .next  = fastnat_level_seq_next,

+    .stop  = fastnat_level_seq_stop,

+    .show  = fastnat_level_seq_show

+};

+

+static int fastnat_level_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fastnat_level_seq_ops);

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t fastnat_level_set(struct file *file,

+                                 const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char char_fastnat[5] = {0};

+    int level = 0;

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-3ÊäÈë

+    if (count != 2)

+        return -EINVAL;

+

+    if (copy_from_user(char_fastnat, buffer, 1))

+        return -EFAULT;

+

+    if ((char_fastnat[0] < '0' || char_fastnat[0] > '2') && (char_fastnat[0] != '5'))

+        return -EINVAL;

+

+    level = (int)(char_fastnat[0] - '0');

+

+    //ÖØÐÂÉèÖÿìËÙת·¢¹³×Óº¯Êý

+    fast_level_change(level);

+    return count;

+}

+

+static void *fast_switch_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *fast_switch_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void fast_switch_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+/* return 0 on success, 1 in case of error */

+static int fast_switch_seq_show(struct seq_file *s, void *v)

+{

+    seq_printf(s, "fast_switch: 0x%x\n", (unsigned int)fast_switch);

+    return 0;

+}

+

+static const struct seq_operations fast_switch_seq_ops = {

+    .start = fast_switch_seq_start,

+    .next  = fast_switch_seq_next,

+    .stop  = fast_switch_seq_stop,

+    .show  = fast_switch_seq_show

+};

+

+static int fast_switch_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fast_switch_seq_ops);

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t fast_switch_set(struct file *file,

+                               const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char char_fastnat[5] = {0};

+    int level = 0, i = 0;

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-3ÊäÈë

+    if (count > 5)

+        return -EINVAL;

+

+    memset(char_fastnat, 0,  5);

+    if (copy_from_user(char_fastnat, buffer, 5))

+        return -EFAULT;

+

+    for(i = 0; i < count - 1; i++) {

+        if(char_fastnat[i] < '0' || char_fastnat[i] > '9')

+            return -EINVAL;

+        level = (int)(char_fastnat[i] - '0') + level*10;

+    }

+

+    //ÖØÐÂÉèÖÿìËÙת·¢¹³×Óº¯Êý

+    fast_switch_change(level);

+    return count;

+}

+

+static void *fastbr_level_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *fastbr_level_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void fastbr_level_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+/* return 0 on success, 1 in case of error */

+static int fastbr_level_seq_show(struct seq_file *s, void *v)

+{

+    seq_printf(s, "fastbr_level: %d\n", fastbr_level);

+    return 0;

+}

+

+static const struct seq_operations fastbr_level_seq_ops = {

+    .start = fastbr_level_seq_start,

+    .next  = fastbr_level_seq_next,

+    .stop  = fastbr_level_seq_stop,

+    .show  = fastbr_level_seq_show

+};

+

+static int fastbr_level_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fastbr_level_seq_ops);

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t fastbr_level_set(struct file *file,

+                                const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char char_fastbr[5] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë

+    if (count != 2)

+        return -EINVAL;

+

+    if (copy_from_user(char_fastbr, buffer, 1))

+        return -EFAULT;

+

+    if (char_fastbr[0] < '0' || char_fastbr[0] > '1')

+        return -EINVAL;

+

+    fastbr_level = (int)(char_fastbr[0] - '0');

+

+    return count;

+}

+

+static void *fastnat_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    spin_lock_bh(&fastnat_spinlock);

+    if (*pos >= nf_conntrack_htable_size)

+        return NULL;

+    else

+    {

+        if (*pos == 0)

+        {

+            seq_printf(seq, "fastnat have %d conn!!!\nskb_num4:%d,fastnat_num:%d\n",

+                       working_list.count, skb_num4, fastnat_num);

+            seq_printf(seq, "fastbr_sum:%d,fastbr_num:%d\n",

+                       skb_num4 + skb_num6 + skb_unknown - fastnat_num - fast6_num, fastbr_num);

+

+            if ((fastnat_ack_param == 1) && (ackdrop_maxnum  >= 1))

+            {

+                seq_printf(seq, "fastnat ack_delay_stats : total_count = %u, forword_count = %u, drop_count = %u, "

+                           "timeout_xmit_count = %u, timeout_drop_count = %u\n",

+                           (unsigned int)ack_delay_stats.total_count, (unsigned int)ack_delay_stats.forword_count,

+                           (unsigned int)ack_delay_stats.drop_count, (unsigned int)ack_delay_stats.timeout_xmit_count,

+                           (unsigned int)ack_delay_stats.timeout_drop_count);

+            }

+        }

+        return &working_hash[*pos];

+    }

+

+}

+

+static void *fastnat_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    //return fastnat_get_next(s, v);

+    if (*pos >= nf_conntrack_htable_size)

+        return NULL;

+    else

+        return &working_hash[*pos];

+}

+

+

+static void fastnat_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    spin_unlock_bh(&fastnat_spinlock);

+}

+

+/* return 0 on success, 1 in case of error */

+static int fastnat_seq_show(struct seq_file *s, void *v)

+{

+    struct hlist_nulls_head    *head = (struct hlist_nulls_head *) v;

+    struct nf_conntrack_tuple_hash *h;

+    struct hlist_nulls_node *n;

+    fast_entry_data_t *nat_entry_data;

+    fast_entry_t *nat_entry = NULL;

+    const struct nf_conntrack_l3proto *l3proto;

+    const struct nf_conntrack_l4proto *l4proto;

+    int ret = 0;

+

+    hlist_nulls_for_each_entry(h, n, head, hnnode)

+    {

+        nat_entry_data = fast_hash_to_data(h);

+        nat_entry = fast_data_to_entry(nat_entry_data);

+

+        if (unlikely(!atomic_inc_not_zero(&nat_entry->ct->ct_general.use)))

+            return 0;

+

+        /* we only want to print DIR_ORIGINAL */

+        if (NF_CT_DIRECTION(h))

+        {

+            nf_ct_put(nat_entry->ct);

+            continue;

+        }

+

+        l4proto = nf_ct_l4proto_find(nf_ct_protonum(nat_entry->ct));

+

+

+        ret = -ENOSPC;

+        seq_printf(s, "l3proto: %u l4proto: %u %lu %lu %lu ",

+                   nf_ct_l3num(nat_entry->ct), nf_ct_protonum(nat_entry->ct),

+                   (unsigned long)(nat_entry->timeout.expires/HZ),

+                   (unsigned long)(tcp_timeouts[nat_entry->ct->proto.tcp.state]/HZ), (unsigned long)(jiffies/HZ));

+        //tcp_conntrack_namesÖ»¶¨ÒåÁ˵½TCP_CONNTRACK_MAXµÄÃû³Æ£¬¶østateºóÃæ»¹ÓжàÓàµÄ״̬

+        if (nat_entry->ct->proto.tcp.state < TCP_CONNTRACK_MAX)

+        {

+#ifdef CONFIG_NF_CONNTRACK_PROCFS

+            if (l4proto->print_conntrack)

+            {

+                l4proto->print_conntrack(s, nat_entry->ct);

+            }

+#endif

+        }

+

+        print_tuple(s, &nat_entry->ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,

+                    l4proto);

+

+        if (seq_print_acct(s, nat_entry->ct, IP_CT_DIR_ORIGINAL))

+        {

+            nf_ct_put(nat_entry->ct);

+            continue;

+        }

+

+        if (!(test_bit(IPS_SEEN_REPLY_BIT, &nat_entry->ct->status)))

+            seq_printf(s, "[UNREPLIED] ");

+

+

+        print_tuple(s, &nat_entry->ct->tuplehash[IP_CT_DIR_REPLY].tuple,

+                    l4proto);

+

+        if (seq_print_acct(s, nat_entry->ct, IP_CT_DIR_REPLY))

+        {

+            nf_ct_put(nat_entry->ct);

+            continue;

+        }

+

+        if (test_bit(IPS_ASSURED_BIT, &nat_entry->ct->status))

+            seq_printf(s, "[ASSURED] ");

+

+

+        seq_printf(s, "NAT_ip=%pI4 NAT_port==%hu  \n",&nat_entry->data[IP_CT_DIR_ORIGINAL].nat_addr,ntohs(nat_entry->data[IP_CT_DIR_ORIGINAL].nat_port));

+        nf_ct_put(nat_entry->ct);

+    }

+

+    return 0;

+}

+

+

+static const struct seq_operations fastnat_seq_ops = {

+    .start = fastnat_seq_start,

+    .next  = fastnat_seq_next,

+    .stop  = fastnat_seq_stop,

+    .show  = fastnat_seq_show

+};

+

+static int fastnat_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fastnat_seq_ops);

+}

+

+static void *fast6_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    spin_lock_bh(&fast6_spinlock);

+    if (*pos >= nf_conntrack_htable_size)

+        return NULL;

+    else

+    {

+        if (*pos == 0)

+        {

+#if 0

+            seq_printf(seq, "fastnat ack_delay_stats : total_count = %d, forword_count = %d, drop_count = %d, "

+                       "timeout_xmit_count = %d, timeout_drop_count = %d\n",

+                       ack_delay_stats.total_count, ack_delay_stats.forword_count, ack_delay_stats.drop_count,

+                       ack_delay_stats.timeout_xmit_count, ack_delay_stats.timeout_drop_count);

+            seq_printf(seq, "fastnat have %d conn!!!\nfastnat_recv_count:%d,fastnat_real_count:%d\n",

+                       working_list.count,fastnat_recv_count,fastnat_real_count);

+            seq_printf(seq, "send_2_ps_failed:%u, send_2_usb_failed:%u\n", send_2_ps_failed, send_2_usb_failed);

+#endif

+            seq_printf(seq, "fast6 have %d conn!!!\nskb_num6:%d,fast6_num:%d\n",

+                       working_list6.count, skb_num6, fast6_num);

+        }

+        return &working_hash6[*pos];

+    }

+

+}

+

+static void *fast6_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    if (*pos >= nf_conntrack_htable_size)

+        return NULL;

+    else

+        return &working_hash6[*pos];

+}

+

+

+static void fast6_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    spin_unlock_bh(&fast6_spinlock);

+}

+

+/* return 0 on success, 1 in case of error */

+static int fast6_seq_show(struct seq_file *s, void *v)

+{

+    struct hlist_nulls_head    *head = (struct hlist_nulls_head *) v;

+    struct nf_conntrack_tuple_hash *h;

+    struct hlist_nulls_node *n;

+    fast_entry_data_t *fast6_entry_data;

+    fast_entry_t *fast6_entry = NULL;

+    const struct nf_conntrack_l3proto *l3proto;

+    const struct nf_conntrack_l4proto *l4proto;

+    int ret = 0;

+

+    hlist_nulls_for_each_entry(h, n, head, hnnode)

+    {

+        fast6_entry_data = fast_hash_to_data(h);

+        fast6_entry = fast_data_to_entry(fast6_entry_data);

+

+        if (unlikely(!atomic_inc_not_zero(&fast6_entry->ct->ct_general.use)))

+            return 0;

+

+        /* we only want to print DIR_ORIGINAL */

+        if (NF_CT_DIRECTION(h))

+        {

+            nf_ct_put(fast6_entry->ct);

+            continue;

+        }

+

+        l4proto = nf_ct_l4proto_find(nf_ct_protonum(fast6_entry->ct));

+

+        ret = -ENOSPC;

+        seq_printf(s, "l3proto: %u l4proto: %u %lu %lu %lu ",

+                   nf_ct_l3num(fast6_entry->ct), nf_ct_protonum(fast6_entry->ct),

+                   (unsigned long)(fast6_entry->timeout.expires/HZ),

+                   (unsigned long)(tcp_timeouts[fast6_entry->ct->proto.tcp.state]/HZ), (unsigned long)(jiffies/HZ));

+

+        //tcp_conntrack_namesÖ»¶¨ÒåÁ˵½TCP_CONNTRACK_MAXµÄÃû³Æ£¬¶østateºóÃæ»¹ÓжàÓàµÄ״̬

+        if (fast6_entry->ct->proto.tcp.state < TCP_CONNTRACK_MAX)

+        {

+#ifdef CONFIG_NF_CONNTRACK_PROCFS

+            if (l4proto->print_conntrack)

+            {

+                l4proto->print_conntrack(s, fast6_entry->ct);

+            }

+#endif

+        }

+

+        print_tuple(s, &fast6_entry->ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,

+                    l4proto);

+

+        if (seq_print_acct(s, fast6_entry->ct, IP_CT_DIR_ORIGINAL))

+        {

+            nf_ct_put(fast6_entry->ct);

+            continue;

+        }

+

+        if (!(test_bit(IPS_SEEN_REPLY_BIT, &fast6_entry->ct->status)))

+            seq_printf(s, "[UNREPLIED] ");

+

+        print_tuple(s, &fast6_entry->ct->tuplehash[IP_CT_DIR_REPLY].tuple,

+                    l4proto);

+

+        if (seq_print_acct(s, fast6_entry->ct, IP_CT_DIR_REPLY))

+        {

+            nf_ct_put(fast6_entry->ct);

+            continue;

+        }

+

+        if (test_bit(IPS_ASSURED_BIT, &fast6_entry->ct->status))

+            seq_printf(s, "[ASSURED] ");

+

+

+        seq_printf(s, "\n");

+        nf_ct_put(fast6_entry->ct);

+    }

+

+    return 0;

+}

+

+static const struct seq_operations fast6_seq_ops = {

+    .start = fast6_seq_start,

+    .next  = fast6_seq_next,

+    .stop  = fast6_seq_stop,

+    .show  = fast6_seq_show

+};

+

+static int fast6_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fast6_seq_ops);

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t dev_down_set(struct file *file,

+                            const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char dev_name[MAX_NET_DEVICE_NAME_LEN + 1] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û

+    size = min(count - 1, MAX_NET_DEVICE_NAME_LEN);

+    if (copy_from_user(dev_name, buffer, size))

+        return -EFAULT;

+

+    //ɾ³ý´ËÍøÂçÉ豸Ïà¹Øipv4,ipv6Á´½Ó

+    fast_device_down_event_by_name(dev_name);

+

+    return count;

+}

+

+static void *nofast_port_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *nofast_port_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void nofast_port_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+static int nofast_port_seq_show(struct seq_file *s, void *v)

+{

+    int i = 0;

+

+    if (nofast_port[0] == 0)

+    {

+        seq_printf(s, "All ports support fast! \n");

+    }

+    else

+    {

+        seq_printf(s, "Not supported ports include:\n%d", nofast_port[0]);

+

+        for (i = 1; i < NOFAST_PROTO_MAX; i++)

+        {

+            //¶Ë¿ÚºÅÓöµ½0½áÊø

+            if (nofast_port[i] == 0)

+                break;

+            seq_printf(s, "+%d", nofast_port[i]);

+        }

+        seq_printf(s, "\n\n");

+    }

+    return 0;

+}

+

+static const struct seq_operations nofast_port_seq_ops = {

+    .start = nofast_port_seq_start,

+    .next  = nofast_port_seq_next,

+    .stop  = nofast_port_seq_stop,

+    .show  = nofast_port_seq_show

+};

+

+static int nofast_port_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &nofast_port_seq_ops);

+}

+

+struct nf_conntrack_tuple tuple_info;

+/*

+1~6 fast Á´½ÓÐÅÏ¢²éѯ

+8 skbÊͷŵã²éѯ

+9 socket ¸ú×ÙÐÅÏ¢²éѯ

+*/

+int getconn_type = 0;

+

+static void *conn_datainfo_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    seq_printf(seq, "start fast4 count:%ld, fw:%ld, local4_recv:%ld, local4_output:%ld\n",

+               (long)skb_num4, (long)fastnat_num, (long)fast_local4_rcv_num, (long)fast_local4_output_num);

+    seq_printf(seq, "start fast6 count:%ld, fw:%ld, local6_recv:%ld, local6_output:%ld\n",

+               (long)skb_num6, (long)fast6_num, (long)fast_local6_rcv_num, (long)fast_local6_output_num);

+    return 1;

+}

+

+static void *conn_datainfo_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void conn_datainfo_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+static int conn_datainfo_seq_show(struct seq_file *s, void *v)

+{

+    int i = 0, j = 0;

+    struct nf_conntrack_tuple_hash *h;

+    struct nf_conntrack_tuple_hash *h_rdir;

+    struct nf_conn * ct;

+    struct hlist_nulls_node *n;

+

+    for(i = 0; i < nf_conntrack_htable_size; i++) {

+        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[i], hnnode) {

+            if(h->tuple.dst.dir != IP_CT_DIR_ORIGINAL)

+                continue;

+            if(tuple_info.dst.protonum && tuple_info.dst.protonum != h->tuple.dst.protonum)

+                continue;

+            if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.ip6, h->tuple.dst.u3.ip6, 16) != 0)

+                continue;

+            if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.ip6, h->tuple.src.u3.ip6, 16) != 0)

+                continue;

+            if(tuple_info.dst.u.all && tuple_info.dst.u.all != h->tuple.dst.u.all) {

+                continue;

+            }

+            if(tuple_info.src.u.all && tuple_info.src.u.all != h->tuple.src.u.all) {

+                continue;

+            }

+

+            ct = container_of(h, struct nf_conn, tuplehash[h->tuple.dst.dir]);

+

+            spin_lock_bh(&fast_fw_spinlock);

+            if(getconn_type && getconn_type != ct->fast_ct.isFast)

+            {

+                spin_unlock_bh(&fast_fw_spinlock);

+                continue;

+            }

+            if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))

+            {

+                spin_unlock_bh(&fast_fw_spinlock);

+                continue;

+            }

+

+            h_rdir = &ct->tuplehash[IP_CT_DIR_REPLY];

+            if(h->tuple.src.l3num == AF_INET) {

+                /*seq_printf(s, "ctinfo protonum: %d  Original sip: %08x, sport: %d, dip: %08x, dport: %d, packets: %lu , bytes: %lu;",

+                    h->tuple.dst.protonum, ntohl(h->tuple.src.u3.ip), ntohs(h->tuple.src.u.all), ntohl(h->tuple.dst.u3.ip), ntohs(h->tuple.dst.u.all),

+                    ct->packet_info[IP_CT_DIR_ORIGINAL].packets, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes);

+                seq_printf(s, "    reply sip: %08x, sport: %d, dip: %08x, dport: %d, packets: %lu , bytes: %lu\n",

+                    ntohl(h_rdir->tuple.src.u3.ip), ntohs(h_rdir->tuple.src.u.all), ntohl(h_rdir->tuple.dst.u3.ip), ntohs(h_rdir->tuple.dst.u.all),

+                    ct->packet_info[IP_CT_DIR_REPLY].packets, ct->packet_info[IP_CT_DIR_REPLY].bytes);*/

+                seq_printf(s, "ctinfo protonum: %d  Original sip: %08x, sport: %d, dip: %08x, dport: %d;",

+                           h->tuple.dst.protonum, ntohl(h->tuple.src.u3.ip), ntohs(h->tuple.src.u.all), ntohl(h->tuple.dst.u3.ip), ntohs(h->tuple.dst.u.all));

+                seq_printf(s, "    reply sip: %08x, sport: %d, dip: %08x, dport: %d\n",

+                           ntohl(h_rdir->tuple.src.u3.ip), ntohs(h_rdir->tuple.src.u.all), ntohl(h_rdir->tuple.dst.u3.ip), ntohs(h_rdir->tuple.dst.u.all));

+            }

+            else if(h->tuple.src.l3num == AF_INET6) {

+                /*seq_printf(s, "ctinfo  protonum: %d  Original sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: %lu , bytes: %lu;",

+                    h->tuple.dst.protonum, ntohs(h->tuple.src.u3.in6.s6_addr16[0]), ntohs(h->tuple.src.u3.in6.s6_addr16[1]), ntohs(h->tuple.src.u3.in6.s6_addr16[2]), ntohs(h->tuple.src.u3.in6.s6_addr16[3]),

+                    ntohs(h->tuple.src.u3.in6.s6_addr16[4]), ntohs(h->tuple.src.u3.in6.s6_addr16[5]), ntohs(h->tuple.src.u3.in6.s6_addr16[6]), ntohs(h->tuple.src.u3.in6.s6_addr16[7]), ntohs(h->tuple.src.u.all),

+                    ntohs(h->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h->tuple.dst.u3.in6.s6_addr16[3]),

+                    ntohs(h->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h->tuple.dst.u.all),

+                    ct->packet_info[IP_CT_DIR_ORIGINAL].packets, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes);

+                seq_printf(s, "    Reply sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: %lu , bytes: %lu\n",

+                    ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[3]),

+                    ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.src.u.all),

+                    ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[3]),

+                    ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.dst.u.all),

+                    ct->packet_info[IP_CT_DIR_REPLY].packets, ct->packet_info[IP_CT_DIR_REPLY].bytes);*/

+                seq_printf(s, "ctinfo  protonum: %d  Original sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: xx , bytes: xx;",

+                           h->tuple.dst.protonum, ntohs(h->tuple.src.u3.in6.s6_addr16[0]), ntohs(h->tuple.src.u3.in6.s6_addr16[1]), ntohs(h->tuple.src.u3.in6.s6_addr16[2]), ntohs(h->tuple.src.u3.in6.s6_addr16[3]),

+                           ntohs(h->tuple.src.u3.in6.s6_addr16[4]), ntohs(h->tuple.src.u3.in6.s6_addr16[5]), ntohs(h->tuple.src.u3.in6.s6_addr16[6]), ntohs(h->tuple.src.u3.in6.s6_addr16[7]), ntohs(h->tuple.src.u.all),

+                           ntohs(h->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h->tuple.dst.u3.in6.s6_addr16[3]),

+                           ntohs(h->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h->tuple.dst.u.all));

+                seq_printf(s, "    Reply sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: xx , bytes: xx\n",

+                           ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[3]),

+                           ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.src.u.all),

+                           ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[3]),

+                           ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.dst.u.all));

+            }

+            if(ct->fast_ct.isFast == FAST_CT_LOCAL6 || ct->fast_ct.isFast == FAST_CT_LOCAL4) {

+                seq_printf(s, "ctinfo ->ISFAST: %d, sk: %#llx\n", ct->fast_ct.isFast, (UINT64)ct->fast_ct.sk);

+            } else if(ct->fast_ct.isFast == FAST_CT_FW6 || ct->fast_ct.isFast == FAST_CT_FW4) {

+                seq_printf(s, "ctinfo ->ISFAST: %d", ct->fast_ct.isFast);

+                if(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL])

+                    seq_printf(s, "    Original fast_dst: %#llx", (UINT64)ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL]);

+                if(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY])

+                    seq_printf(s, "    Reply fast_dst: %#llx", (UINT64)ct->fast_ct.fast_dst[IP_CT_DIR_REPLY]);

+                if(ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL])

+                    seq_printf(s, "    Original fast_brport: %#llx", (UINT64)ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL]);

+                if(ct->fast_ct.fast_brport[IP_CT_DIR_REPLY])

+                    seq_printf(s, "    Reply fast_brport: %#llx", (UINT64)ct->fast_ct.fast_brport[IP_CT_DIR_REPLY]);

+                seq_printf(s, "\n");

+            }

+

+            spin_unlock_bh(&fast_fw_spinlock);

+            nf_ct_put(ct);

+        }

+    }

+    return 0;

+}

+

+static const struct seq_operations conn_datainfo_seq_ops= {

+    .start = conn_datainfo_seq_start,

+    .next  = conn_datainfo_seq_next,

+    .stop  = conn_datainfo_seq_stop,

+    .show  = conn_datainfo_seq_show

+

+};

+

+static int conn_datainfo_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &conn_datainfo_seq_ops);

+}

+

+//Ê®½øÖÆ×Ö·û´®×ª»¯ÎªÕûÊý

+static int str2int(char *str)

+{

+    int i = 0, value = 0, negative = 1;

+    int len = strlen(str);

+

+    for (i = 0; i < len; i++)

+    {

+        //Ìø¹ýÇ°ÃæµÄ¿Õ¸ñ

+        if ((value == 0) && (str[i] == ' '))

+            continue;

+

+        //µÚÒ»¸öÓÐЧ×Ö·ûÊǸººÅ

+        if ((negative == 1) && (str[i] == '-'))

+        {

+            negative = -1;

+            continue;

+        }

+

+        //Óöµ½·ÇÊ®½øÖÆÊý×ÖÔò½áÊø

+        if (str[i] < '0' || str[i] > '9')

+            break;

+        value = value * 10 + (str[i] - '0');

+    }

+    return value * negative;

+}

+

+static void parse_nofast_port(const char *str, char split)

+{

+    char *p = NULL;

+    char *pre = str;

+    char portStr[PORT_LEN] = {0}; //ЭÒé¶Ë¿ÚºÅ×î´óΪ65535

+    int count = 0, port = 0, len = 0;

+

+    memset(nofast_port, 0, NOFAST_PROTO_MAX * sizeof(nofast_port[0]));

+

+    for (; (p = strchr(pre, split)) != NULL; pre = p + 1)

+    {

+        //µÚÒ»¸ö×Ö·û¾ÍÊÇ·Ö¸ô·û

+        if (p == pre)

+            continue;

+

+        memset(portStr, 0, PORT_LEN);

+        len = min(p - pre, PORT_LEN - 1);

+        snprintf(portStr,len+1,"%s",pre);

+        port = str2int(portStr);

+        if (port <= 0 || port > 65535) //¶Ë¿ÚºÅ×î´ó65535

+        {

+            continue;

+        }

+        nofast_port[count++] = port;

+        if (count == NOFAST_PROTO_MAX)

+            return;

+    }

+

+    if (*pre != '\0') //×îºóÒ»¸ö²»ÊÇ·Ö¸ô·û

+    {

+        memset(portStr, 0, PORT_LEN);

+        len = min(str + strlen(str) - pre, PORT_LEN - 1);

+        snprintf(portStr,len+1,"%s",pre);

+        port = str2int(portStr);

+        if (port <= 0 || port > 65535) //¶Ë¿ÚºÅ×î´ó65535

+        {

+            return;

+        }

+        nofast_port[count++] = port;

+    }

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t nofast_port_set(struct file *file,

+                               const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char proto[1024] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û

+    size = min(count - 1, 1024);

+    if (copy_from_user(proto, buffer, size))

+        return -EFAULT;

+

+    //½âÎö×Ö·û´®

+    parse_nofast_port(proto, '+');

+

+    return count;

+}

+

+extern int in4_pton(const char *src, int srclen,

+                    u8 *dst,

+                    int delim, const char **end);

+extern int in6_pton(const char *src, int srclen,

+                    u8 *dst,

+                    int delim, const char **end);

+

+

+static void conn_datainfo_get_str(char *str, char *start, char *end) {

+    strncat(str, start, end - start);

+    *(str + (unsigned long)end - (unsigned long)start) = '\0';

+}

+

+/***************************************

+ÊäÈë¸ñʽ:    Ô´ip+Ô´port+Ä¿µÄip+Ä¿µÄport+l4ЭÒéÀàÐÍ+fastÁ´½ÓÀàÐÍ(²Î¿¼ enum conn_fast_type)

+ʵÀý:        192.168.0.100+1111+192.168.30.102+2222+6+4

+×¢Òâ:        ȱʡijһÏĬÈÏÄÇÒ»ÏîȫƥÅä

+            +++++:±íʾÊä³öÈ«²¿Á´½ÓÐÅÏ¢

+***************************************/

+static ssize_t conn_datainfo_set(struct file *file,

+                                 const char __user *buffer, size_t count, loff_t *pos)

+{

+    char tuple[1024] = "";

+    int i = 0;

+    char *split[5];

+    char sip[40] = "";

+    char sport[6] = "";

+    char dip[40] = "";

+    char dport[6] = "";

+    char protonum[6] = "";

+    char conn_type[6] = "";

+    const char *end;

+

+    tuple[1023] = '\0';

+    if (strncpy_from_user(tuple, (char *)buffer, count) <= 0) {

+        return -EFAULT;

+    }

+    memset(&tuple_info, 0,sizeof(struct nf_conntrack_tuple));

+    for(i = 0; i < 5; i++) {

+        if(i == 0)

+            split[i] = strchr(tuple, '+');

+        else

+            split[i] = strchr(split[i-1] + 1, '+');

+        if(!split[i])

+            goto err_out;

+        switch(i) {

+        case 0:

+            conn_datainfo_get_str(sip, tuple, split[i]);

+            break;

+        case 1:

+            conn_datainfo_get_str(sport, split[i-1] + 1, split[i]);

+            break;

+        case 2:

+            conn_datainfo_get_str(dip, split[i-1] + 1, split[i]);

+            break;

+        case 3:

+            conn_datainfo_get_str(dport, split[i-1] + 1, split[i]);

+            break;

+        case 4:

+            conn_datainfo_get_str(protonum, split[i-1] + 1, split[i]);

+            break;

+        default:

+            goto err_out;

+        }

+    }

+    strncat(conn_type, split[i-1] + 1, sizeof(conn_type)-strlen(conn_type)-1);

+    if(strlen(sip) > 0) {

+        if(strchr(sip,'.') != NULL && in4_pton(sip, strlen(sip), (u8 *)&tuple_info.src.u3.in, -1, &end) != 1)

+            goto err_out;

+        else if(strchr(sip,':') != NULL && in6_pton(sip, strlen(sip), (u8 *)&tuple_info.src.u3.in6, -1, &end) != 1)

+            goto err_out;

+    }

+    if(strlen(sport) > 0) {

+        for(i = 0; i < strlen(sport); i++) {

+            if(sport[i] < '0' || sport[i] > '9')

+                goto err_out;

+            tuple_info.src.u.all = sport[i] - '0' + tuple_info.src.u.all*10;

+        }

+        if(tuple_info.src.u.all > 65535)

+            goto err_out;

+        tuple_info.src.u.all = htons(tuple_info.src.u.all);

+    }

+    if(strlen(dip) > 0) {

+        if(strchr(dip,'.') != NULL && in4_pton(dip, strlen(dip), (u8 *)&tuple_info.dst.u3.in, -1, &end) != 1)

+            goto err_out;

+        else if(strchr(dip,':') != NULL && in6_pton(dip, strlen(dip), (u8 *)&tuple_info.dst.u3.in6, -1, &end) != 1)

+            goto err_out;

+    }

+    if(strlen(dport) > 0) {

+        for(i = 0; i < strlen(dport); i++) {

+            if(dport[i] < '0' || dport[i] > '9')

+                goto err_out;

+            tuple_info.dst.u.all = dport[i] - '0' + tuple_info.dst.u.all*10;

+        }

+        if(tuple_info.dst.u.all > 65535)

+            goto err_out;

+        tuple_info.dst.u.all = htons(tuple_info.dst.u.all);

+    }

+    if(strlen(protonum) > 0) {

+        for(i = 0; i < strlen(protonum); i++) {

+            if(protonum[i] < '0' || protonum[i] > '9')

+                goto err_out;

+            tuple_info.dst.protonum = protonum[i] - '0' + tuple_info.dst.protonum*10;

+        }

+    }

+    if(strlen(conn_type) > 0) {

+        getconn_type = 0;

+        for(i = 0; i < strlen(conn_type) - 1; i++) {

+            if(conn_type[i] < '0' || conn_type[i] > '9')

+                goto err_out;

+            getconn_type = conn_type[i] - '0' + getconn_type*10;

+        }

+    }

+    return count;

+err_out:

+    memset(&tuple_info, 0,sizeof(struct nf_conntrack_tuple));

+    getconn_type = 0;

+    return -EFAULT;

+}

+

+extern int pkt_lost_track;

+static void *pkt_lostinfo_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *pkt_lostinfo_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void pkt_lostinfo_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+static int pkt_lostinfo_seq_show(struct seq_file *s, void *v)

+{

+    return 0;

+

+}

+

+static const struct seq_operations pkt_lostinfo_seq_ops= {

+    .start = pkt_lostinfo_seq_start,

+    .next  = pkt_lostinfo_seq_next,

+    .stop  = pkt_lostinfo_seq_stop,

+    .show  = pkt_lostinfo_seq_show,

+};

+

+static int pkt_lostinfo_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &pkt_lostinfo_seq_ops);

+}

+

+static ssize_t pkt_lostinfo_set(struct file *file,

+                                const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char temp[5] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë

+    if (count != 2)

+        return -EINVAL;

+

+    if (copy_from_user(temp, buffer, 1))

+        return -EFAULT;

+

+    if (temp[0] < '0' || temp[0] > '1')

+        return -EINVAL;

+

+

+

+    return count;

+}

+

+

+static ssize_t dev_reset_set(struct file *file,

+		const char __user *buffer, size_t count, loff_t *pos)

+{

+	struct net_device *dev = NULL;

+	size_t size;

+	char dev_name[MAX_NET_DEVICE_NAME_LEN + 1] = {0};

+

+	//countβ²¿°üº¬ÁË1¸ö½áÊø·û

+	size = min(count - 1, MAX_NET_DEVICE_NAME_LEN);

+	if (copy_from_user(dev_name, buffer, size))

+		return -EFAULT;

+

+	//ɾ³ý´ËÍøÂçÉ豸Ïà¹Østat

+	dev = dev_get_by_name(&init_net, dev_name);

+	if (dev){

+		memset(&dev->stats, 0, sizeof(struct net_device_stats));

+		atomic_long_set(&dev->rx_dropped, 0);

+		dev_put(dev);

+	}else

+		printk("dev_reset_set %s not find\n", dev_name);

+	return count;

+}

+

+

+static void *ct_iptables_syn_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+	if (*pos >= 1)

+		return NULL;

+	return 1;

+}

+

+static void *ct_iptables_syn_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+	(*pos)++;

+	return NULL;

+}

+

+static void ct_iptables_syn_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+	return;

+}

+

+static int ct_iptables_syn_seq_show(struct seq_file *s, void *v)

+{

+	seq_printf(s, "ct_iptables_syn_sw: %u\n", ct_iptables_syn_sw);

+	return 0;

+

+}

+

+static const struct seq_operations ct_iptables_syn_seq_ops= {

+	.start = ct_iptables_syn_seq_start,

+	.next  = ct_iptables_syn_seq_next,

+	.stop  = ct_iptables_syn_seq_stop,

+	.show  = ct_iptables_syn_seq_show,

+};

+

+static int ct_iptables_syn_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &ct_iptables_syn_seq_ops);

+}

+

+static ssize_t ct_iptables_syn_set(struct file *file,

+                                const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char temp[5] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë

+    if (count != 2)

+        return -EINVAL;

+

+    if (copy_from_user(temp, buffer, 1))

+        return -EFAULT;

+

+    if (temp[0] < '0' || temp[0] > '1')

+        return -EINVAL;

+

+	ct_iptables_syn_sw = (unsigned int)(temp[0] - '0');

+

+    return count;

+}

+

+static const struct proc_ops fastnat_level_file_ops = {

+    .proc_open    = fastnat_level_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release, //seq_release_privateËÆºõÒ²¿ÉÒÔ£¬µ«ÊÇsingle_release£¬seq_release_net»áÓпÕÖ¸ÕëÒì³£

+    .proc_write = fastnat_level_set,

+};

+

+static const struct proc_ops fast_switch_file_ops = {

+    .proc_open    = fast_switch_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release, //seq_release_privateËÆºõÒ²¿ÉÒÔ£¬µ«ÊÇsingle_release£¬seq_release_net»áÓпÕÖ¸ÕëÒì³£

+    .proc_write = fast_switch_set,

+};

+

+static const struct proc_ops fastbr_level_file_ops = {

+    .proc_open    = fastbr_level_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = fastbr_level_set,

+};

+

+static const struct proc_ops fastnat_file_ops = {

+    .proc_open    = fastnat_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release,

+};

+

+static const struct proc_ops fast6_file_ops = {

+    .proc_open    = fast6_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release,

+};

+

+static const struct proc_ops dev_down_file_ops = {

+    .proc_write = dev_down_set,

+};

+

+static const struct proc_ops nofast_port_file_ops = {

+    .proc_open    = nofast_port_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = nofast_port_set,

+};

+

+static const struct proc_ops conn_datainfo_file_ops = {

+    .proc_open = conn_datainfo_open,

+    .proc_read = seq_read,

+    .proc_lseek = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = conn_datainfo_set

+};

+

+static const struct proc_ops pkt_lostinfo_file_ops = {

+    .proc_open = pkt_lostinfo_open,

+    .proc_read = seq_read,

+    .proc_lseek = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = pkt_lostinfo_set,

+};

+

+static const struct proc_ops dev_reset_file_ops = {

+	.proc_write = dev_reset_set,

+};

+

+static const struct proc_ops ct_iptables_syn_file_ops = {

+    .proc_open = ct_iptables_syn_open,

+    .proc_read = seq_read,

+    .proc_lseek = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = ct_iptables_syn_set,

+};

+

+

+//¿ìËÙת·¢procÎļþµÄ³õʼ»¯

+int fast_conntrack_init_proc(void)

+{

+    //Èý²ã¿ìËÙת·¢×Ü¿ª¹Ø

+    proc_create("fastnat_level", 0440, init_net.proc_net, &fastnat_level_file_ops);

+

+    //Èý²ã¿ìËÙת·¢Ð¾ɿª¹Ø£¬¸÷ÀàÐÍ¿ª¹Ø

+    proc_create("fast_switch", 0440, init_net.proc_net, &fast_switch_file_ops);

+

+    //¶þ²ã¿ìËÙת·¢µÈ¼¶0-1

+    proc_create("fastbr_level", 0440, init_net.proc_net, &fastbr_level_file_ops);

+

+    //ipv4¿ìËÙת·¢Ïà¹ØÍ³¼Æ

+    proc_create("fastnat", 0440, init_net.proc_net, &fastnat_file_ops);

+

+    //ipv6¿ìËÙת·¢Ïà¹ØÍ³¼Æ

+    proc_create("fast6", 0440, init_net.proc_net, &fast6_file_ops);

+

+    //ijЩÉ豸²»ÄÜdown²Ù×÷£¬µ«ÊÇÈ¥¼¤»îºóÓÖҪɾ³ýÏà¹ØÁ´½Ó

+    proc_create("dev_down", 0440, init_net.proc_net, &dev_down_file_ops);

+

+    //²»Ö§³ÖfastnatµÄЭÒé¶Ë¿Ú£¬Ö§³Ö¶¯Ì¬ÅäÖÃ

+    proc_create("nofast_port", 0440, init_net.proc_net, &nofast_port_file_ops);

+

+    //¶ÁÈ¡Á´½ÓÐÅÏ¢

+    proc_create("conn_datainfo", 0440, init_net.proc_net, &conn_datainfo_file_ops);

+

+    //¶ÁÈ¡Á´½Ó¶ª°üÐÅÏ¢

+    //proc_create("pkt_lostinfo", 0440, init_net.proc_net, &pkt_lostinfo_file_ops);

+

+    //reset dev stats

+    proc_create("dev_reset_stats", 0440, init_net.proc_net, &dev_reset_file_ops);

+

+	proc_create("ct_iptables_syn", 0440, init_net.proc_net, &ct_iptables_syn_file_ops);

+    return 1;

+}

+

+EXPORT_SYMBOL(fast_conntrack_init_proc);

+

+

diff --git a/upstream/linux-5.10/net/core/fastproc/fastnat.c b/upstream/linux-5.10/net/core/fastproc/fastnat.c
new file mode 100755
index 0000000..717454c
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fastnat.c
@@ -0,0 +1,687 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/timer.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fastnat.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/print_sun.h>
+#include <net/SI/net_track.h>
+#include <linux/netfilter.h>
+#include <net/SI/fast_common.h>
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv4 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************* */
+spinlock_t fastnat_spinlock;          //×ÔÐýËø£¬±£»¤Á´±íµÄ²Ù×÷
+fast_list_t working_list = {0};
+struct hlist_nulls_head *working_hash;
+
+/* **************************** º¯ÊýÉêÃ÷ ************************ */
+
+
+/* **************************** º¯ÊýʵÏÖ ************************ */
+
+/*»ñÈ¡±¨ÎÄÎåÔª×éÐÅÏ¢*/
+static inline int fast_nat_get_tuple(struct sk_buff *skb, struct nf_conntrack_tuple *tuple)
+{
+    struct iphdr  *iph;
+    struct udphdr *udph;
+    struct tcphdr *tcph;
+#if 0
+    if (!skb || !tuple)
+    {
+        return -1;
+    }
+
+    /* only IP packets */
+    if (htons(ETH_P_IP) != skb->protocol)
+    {
+        return -1;
+    }
+#endif
+    iph = (struct iphdr *)skb->data;
+#if 0
+    /* not deal with fragment packets now */
+    if (ntohs(iph->frag_off) & (IP_MF | IP_OFFSET))
+    {
+        skbinfo_add(NULL,SKB_FRAG);
+        return -1;
+    }
+
+    if (iph->ttl <= 1)
+    {
+        return -1;
+    }
+#endif
+    memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
+    /* only tcp/udp */
+    if (IPPROTO_UDP == iph->protocol)
+    {
+        udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+        tuple->src.u.udp.port = udph->source;
+        tuple->dst.u.udp.port = udph->dest;
+        skb_udpnum++;
+    }
+    else if (IPPROTO_TCP == iph->protocol)
+    {
+        tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+        tuple->src.u.tcp.port = tcph->source;
+        tuple->dst.u.tcp.port = tcph->dest;
+        skb_tcpnum++;
+    }
+    else
+    {
+        return -1;
+    }
+
+    tuple->src.l3num = AF_INET;
+    tuple->src.u3.ip = iph->saddr;
+    tuple->dst.u3.ip = iph->daddr;
+    tuple->dst.protonum = iph->protocol;
+    tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+    return 0;
+}
+
+//´Ë´¦ÐèÒª±£³ÖºÍdev_xmit_completeÒ»ÖÂ
+//ÓÃinlineÎÞ·¨ÌáÈ¡µ½¹«¹²ÎļþÖУ¬Ö»ÄÜfastnat¡¢fast6¸÷·ÅÒ»·Ý
+static inline bool start_xmit_complete(int rc)
+{
+    /*
+     * Positive cases with an skb consumed by a driver:
+     * - successful transmission (rc == NETDEV_TX_OK)
+     * - error while transmitting (rc < 0)
+     * - error while queueing to a different device (rc & NET_XMIT_MASK)
+     */
+    if (likely(rc < NET_XMIT_MASK))
+        return true;
+
+    return false;
+}
+#ifndef CONFIG_PREEMPT_RT_FULL
+extern int *vir_addr_ddrnet;
+#endif
+
+//ipv4Êý¾Ý°üµÄ¿ìËÙ´¦Àí£¬hashÓÃRCU»úÖÆ½øÐб£»¤£¬×ܵÄÁ¬½ÓÁ´±íÓÃspin½øÐб£»¤
+int fast_nat_recv(struct sk_buff *skb)
+{
+    struct nf_conntrack_tuple tuple;
+    fast_entry_data_t *nat_entry_data = NULL;
+    fast_entry_t *nat_entry = NULL;
+    struct iphdr *iph = NULL;
+    struct udphdr *udph = NULL;
+    struct tcphdr *tcph = NULL;
+    __sum16 *cksum = NULL;
+    __be32 *oldip = NULL;
+    __be16 *oldport = NULL;
+    struct net_device *dev = NULL;
+    //u_int32_t skip_nat = 0;
+    struct sk_buff *skb2 = NULL;
+
+    iph = (struct iphdr *)skb->data;
+    //if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+        //goto err_out;
+
+    if (fast_nat_get_tuple(skb, &tuple) < 0)
+    {
+        print_sun(SUN_DBG, "fast_nat_get_tuple  ERR  !!!\n");
+        goto err_out;
+    }
+
+    rcu_read_lock();
+    nat_entry_data = fast_find_entry_data(working_hash, &tuple);
+    if (unlikely(nat_entry_data == NULL))
+    {
+        rcu_read_unlock();
+        print_sun(SUN_DBG, "fast_nat_find  ERR  !!!\n");
+        goto err_out;
+    }
+
+    dev = nat_entry_data->outdev;
+    if (unlikely(!dev))
+    {
+        rcu_read_unlock();
+        goto err_out;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+    if (unlikely(skb->len > dev->mtu))
+    {
+        skbinfo_add(NULL, SKB_OVER_MTU);
+        rcu_read_unlock();
+        print_sun(SUN_DBG, "fast_nat_recv outdev mtu ERR !!!\n");
+        goto err_out;
+    }
+	
+    //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+    if (unlikely(skb->dev == dev))
+    {
+        skbinfo_add(NULL, SKB_LOOP);
+        rcu_read_unlock();
+        kfree_skb(skb);
+        return 1;
+    }
+
+    nat_entry = fast_data_to_entry(nat_entry_data);
+    if (unlikely(!nat_entry))
+    {
+        rcu_read_unlock();
+        print_sun(SUN_DBG, "fast_nat_recv fast_nat_data_to_entry null !!!\n");
+        goto err_out;
+    }
+
+    /* Ö»Óе±Ë«Ïò¿ìËÙÁ´½Ó¶¼½¨Á¢³É¹¦²Å×ßFASTNAT£¬·ñÔò×ß±ê×¼Á÷³Ì */
+    if ((nat_entry->flags != FAST_ALL_DIR) && (IPPROTO_UDP != iph->protocol))
+    {
+        rcu_read_unlock();
+        print_sun(SUN_DBG, "fast_nat_recv flags is not FAST_ALL_DIR !!!\n");
+        goto err_out;
+    }
+
+    if (unlikely(!(skb2 = fast_expand_headroom(skb, dev)))) {
+        rcu_read_unlock();
+        return 1;
+    }
+
+    if (unlikely(skb2 != skb))
+    {
+        iph = (struct iphdr *)skb2->data;
+        skb = skb2;
+    }
+
+    fast_tcpdump(skb);
+
+    //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+    if (skb_cloned(skb))
+    {
+        print_sun(SUN_DBG, "fast_nat_recv clone \n");
+        if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+        {
+            rcu_read_unlock();
+            print_sun(SUN_DBG, "fast_nat_recv clone copy failed !!!\n");
+            kfree_skb(skb);
+            return 1;
+        }
+    }
+
+
+    //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+    skb_reset_network_header(skb);
+    skb->isFastnat = 1;
+	if (likely(skb_get_nfct(skb) == 0)){
+		skb_set_nfct(skb, (unsigned long)nat_entry->ct);
+		nf_conntrack_get(&nat_entry->ct->ct_general);
+	}
+    //²»Ö§³ÖNAT£¬Ô´µØÖ·/Ä¿µÄµØÖ·/¶Ë¿ÚºÅ¶¼²»ÐèÒª¸Ä±ä£¬Ö±½Ó͸´«¾ÍÐÐ
+    //if (nat_entry_data->is_not_nat)
+        //skip_nat = 1;
+    if (!nat_entry_data->is_not_nat)//(!skip_nat)
+    {
+        /*½øÐÐnatת»»*/
+        if (IPPROTO_TCP == iph->protocol)
+        {
+            tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+            cksum = &tcph->check;
+            oldport = (FN_TYPE_SRC == nat_entry_data->type)? (&tcph->source): (&tcph->dest);
+        }
+        else if (IPPROTO_UDP == iph->protocol)
+        {
+            udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+            cksum = &udph->check;
+            oldport = (FN_TYPE_SRC == nat_entry_data->type)? (&udph->source): (&udph->dest);
+        }
+
+        oldip = (FN_TYPE_SRC == nat_entry_data->type)? (&iph->saddr) : (&iph->daddr);
+
+        if (cksum != NULL && (0!=*cksum || IPPROTO_TCP == iph->protocol))
+        {
+            inet_proto_csum_replace4(cksum, skb, *oldip, nat_entry_data->nat_addr, 0);
+            inet_proto_csum_replace2(cksum, skb, *oldport, nat_entry_data->nat_port, 0);
+        }
+        csum_replace4(&iph->check, *oldip, nat_entry_data->nat_addr);
+        if(oldport)
+            *oldport = nat_entry_data->nat_port;
+        *oldip = nat_entry_data->nat_addr;
+    }
+    else
+    {
+        if (IPPROTO_TCP == iph->protocol)
+        {
+            tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+        }
+    }
+
+    skb->priority = nat_entry_data->priority;
+    skb->mark = nat_entry_data->mark;
+
+    //»ùÓÚctÁ´½ÓµÄÁ÷Á¿Í³¼Æ --- ͳ¼ÆIP°ü²»ÊÇMAC°ü
+    if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL){
+        nat_entry->ct->packet_info[IP_CT_DIR_ORIGINAL].packets++;
+        nat_entry->ct->packet_info[IP_CT_DIR_ORIGINAL].bytes += skb->len;
+		if(unlikely(nat_entry->ct->indev[IP_CT_DIR_ORIGINAL] == NULL))
+			nat_entry->ct->indev[IP_CT_DIR_ORIGINAL] = skb->indev;
+		if(unlikely(nat_entry->ct->outdev[IP_CT_DIR_ORIGINAL] == NULL))
+			nat_entry->ct->outdev[IP_CT_DIR_ORIGINAL] = dev;
+    } else if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_REPLY){
+        nat_entry->ct->packet_info[IP_CT_DIR_REPLY].packets++;
+        nat_entry->ct->packet_info[IP_CT_DIR_REPLY].bytes += skb->len;
+		if(unlikely(nat_entry->ct->indev[IP_CT_DIR_REPLY] == NULL))
+			nat_entry->ct->indev[IP_CT_DIR_REPLY] = skb->indev;
+		if(unlikely(nat_entry->ct->outdev[IP_CT_DIR_REPLY] == NULL))
+			nat_entry->ct->outdev[IP_CT_DIR_REPLY] = dev;
+    } else {
+        printk("fastnat packet error\n");
+    }
+
+    //ÄÚºË×Ô´øµÄ»ùÓÚÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+    struct nf_conn_counter *acct = (struct nf_conn_counter *)nf_conn_acct_find((const struct nf_conn *)nat_entry->ct);
+    if (acct) {
+        enum ip_conntrack_info ctinfo;
+        if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL)
+            ctinfo = IP_CT_ESTABLISHED;
+        else
+            ctinfo = IP_CT_ESTABLISHED_REPLY;
+
+        atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+        atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
+    }
+
+    /* ¶¨Öƹ¦ÄÜ£¬ÎªÁ˽â¾öµ¥UDP¹à°üʱ£¬ÎÞ·¨ÖªÏþindev½øÐÐÁ÷Á¿Í³¼ÆÎÊÌâ¶¨ÖÆ */
+    if (unlikely(nat_entry_data->indev == NULL))
+    {
+        nat_entry_data->indev = skb->dev;
+    }
+
+    // ͳ¼ÆÈë¿ÚÍøÂçÉ豸µÄ½ÓÊÕ°üÊýÁ¿  --- ²Î¿¼linuxÔ­ÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+    if (likely(fastnat_level == FAST_NET_DEVICE))
+    {
+        nat_entry_data->indev->stats.rx_packets++;
+        nat_entry_data->indev->stats.rx_bytes += skb->len;
+    }
+
+    skb->dev = dev;
+
+    //Ö»Óе±ÓÐMACÍ·Ô¤¸³ÖµÊ±£¬²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+    skb_push(skb, ETH_HLEN);
+    if (likely(nat_entry_data->hh_flag))
+    {
+        memcpy(skb->data, nat_entry_data->hh_data, ETH_HLEN);
+    }
+
+    /*¸üÐÂÁ´½Ó³¬Ê±*/
+    if (IPPROTO_TCP == iph->protocol)
+    {
+        mod_timer(&nat_entry->timeout, jiffies + tcp_timeouts[nat_entry->ct->proto.tcp.state]);
+        update_tcp_timeout(nat_entry, nat_entry_data, tcph);
+        nat_entry->ct->timeout = jiffies + tcp_timeouts[nat_entry->ct->proto.tcp.state];
+
+        if(ackfilter(skb, nat_entry, &working_list) == 1)
+        {
+            rcu_read_unlock();
+            //spin_unlock_bh(&fastnat_spinlock);
+            return 1;
+        }
+    }
+    else if (IPPROTO_UDP == iph->protocol)
+    {
+        /*udp*/
+        if (test_bit(IPS_SEEN_REPLY_BIT, &nat_entry->ct->status))
+        {
+            mod_timer(&nat_entry->timeout, jiffies + fast_udp_timeout_stream);
+            nat_entry->ct->timeout = jiffies + fast_udp_timeout_stream;
+        }
+        else
+        {
+            mod_timer(&nat_entry->timeout, jiffies + fast_udp_timeout);
+            nat_entry->ct->timeout = jiffies + fast_udp_timeout;
+        }
+    }
+
+    if (likely(skb->dev->flags & IFF_UP))
+    {
+        //pppÖ»ÐèÒª´«ÊäIP°ü
+		if (unlikely(skb->dev->type == ARPHRD_PPP))//(strncmp(skb->dev->name, ppp_name, strlen(ppp_name)) == 0)
+        {
+            skb_pull(skb, ETH_HLEN);
+        }
+
+        skb->now_location |= FASTNAT_SUCC;
+        if (likely(fastnat_level == FAST_NET_DEVICE))
+        {
+            //print_sun(SUN_DBG, "fastnat-2 dev_queue_xmit, send to:%s iph->id=0x%02x!!!!!!!! \n", skb->dev->name, iph->id);
+			if (skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev) >= NET_XMIT_MASK) {
+				skb->dev->stats.tx_dropped++;
+				kfree_skb(skb);
+			}
+        }
+        //¶ÔÓÚÁ÷¿ØµÈÌØÊâÓ¦Óã¬Ö»ÄÜ×ß±ê×¼µÄfastnatÁ÷³Ì£¬·ñÔòÎÞ·¨½øÐвå¼þÖ´ÐÐ
+        else if (fastnat_level == FAST_NET_CORE)
+        {
+            //print_sun(SUN_DBG, "fastnat ok-1, send to:%s !!!!!!!! \n", skb->dev->name);
+
+            dev_queue_xmit(skb);
+        }
+        else
+            print_sun(SUN_DBG,"fastnat_level:%d is not supported !!!!!!!! \n", fastnat_level);
+
+        nat_entry_data->packet_num++;
+    }
+    else
+    {
+        print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+        kfree_skb(skb);
+    }
+
+    //print_sun(SUN_DBG, "skb : 0x%x, fastnat succ--------", skb);
+    //nf_ct_dump_tuple(&tuple);
+    rcu_read_unlock();
+
+    return 1;
+
+err_out :
+    print_sun(SUN_DBG, "skb : 0x%x, fastnat FAIL!!!!!!!!!!", skb);
+    return 0; /* not fast nat */
+}
+
+static struct nf_hook_ops nat_hook = {
+    .hook = napt_handle,
+//    .owner = THIS_MODULE,
+    .pf = PF_INET,
+    .hooknum = NF_INET_POST_ROUTING,
+    .priority = NF_IP_PRI_LAST,
+};
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt_handle(void *priv,
+                         struct sk_buff *skb,
+                         const struct nf_hook_state *state)
+{
+    struct nf_conn *ct;
+    enum ip_conntrack_info ctinfo;
+    u_int8_t protocol;
+    fast_entry_t *nat_entry;
+    fast_entry_data_t *entry_data;
+    enum ip_conntrack_dir dir, rdir;
+    struct dst_entry *dst = skb_dst(skb);
+    struct net_device *out = state->out;
+#ifdef CONFIG_ATHRS_HW_NAT
+    u_int32_t mask =0;
+#endif
+    struct neighbour *_neighbour = NULL;
+
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        return NF_ACCEPT;
+    }
+
+    if (test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+    {
+        return NF_ACCEPT;
+    }
+
+    if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+        return NF_ACCEPT;
+
+    if (!out)
+    {
+        return NF_ACCEPT;
+    }
+
+    //¹ã²¥¡¢×é²¥²»½¨Á´
+    if (ipv4_is_multicast(ip_hdr(skb)->daddr) || ipv4_is_lbcast(ip_hdr(skb)->daddr))
+    {
+        return NF_ACCEPT;
+    }
+
+    //´Ë´¦Òª¹Ø×¢ÊÇ·ñ»áƵ·±³öÏÖfastÁ´½ÓÒÑÂúÇé¿ö£¬Èç¹û¾­³£³öÏÖ£¬ÊÇ·ñ¿¼ÂÇ×î¾É¸²¸ÇÇé¿ö
+    if (working_list.count > nf_conntrack_max)
+    {
+        return NF_ACCEPT;
+    }
+
+    if (!dst)
+    {
+        return NF_ACCEPT;
+    }
+
+    _neighbour = dst_neigh_lookup_skb(dst, skb);
+    if (!_neighbour)
+    {
+        print_sun(SUN_DBG,"napt_handle() _neighbour = null\n");
+        return NF_ACCEPT;
+    }
+
+    if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+    {
+        if (strncmp(out->name, ppp_name, strlen(ppp_name)) != 0)
+        {
+            goto accept;
+        }
+    }
+
+    if (!(ct = nf_ct_get(skb, &ctinfo)))
+    {
+        print_sun(SUN_DBG,"napt_handle() ct = null\n");
+        goto accept;
+    }
+
+    protocol = nf_ct_protonum(ct);
+    print_sun(SUN_DBG,"napt_handle() protocol = %d\n", protocol);
+
+    if (ct->master == NULL)
+    {
+        //const struct nf_conntrack_helper *helper;
+        struct nf_conn_help *temp_help = nfct_help(ct);
+        //¶ÔÓÚijÌõÁ´½ÓÉÏ´æÔÚhelpµÈ¹³×Ó£¬±ØÐë½»ÓÉlinux±ê×¢Äں˴¦Àí£¬·ñÔòÄں˲»ÄÜ»ñÈ¡Ïà¹ØµÄÊý¾Ý°üÐÅÏ¢
+        if(temp_help!=NULL)
+        {
+            //helper = rcu_dereference(temp_help->helper);
+            //if(!(helper->tuple.src.u.all == htons(21)&&helper->tuple.dst.protonum == IPPROTO_TCP)) {
+            goto accept;
+            //   }
+        }
+    }
+
+
+    /* only forward */
+    if (!skb->skb_iif)
+    {
+        goto accept;
+    }
+
+
+    //¹ýÂ˲»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü,¸ù¾Ý¶Ë¿ÚºÅ½øÐйýÂË
+    if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+    {
+        goto accept;
+    }
+
+    dir = CTINFO2DIR(ctinfo);
+
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+    print_sun(SUN_DBG,"napt_handle() dir=%d, rdir=%d\n", dir, rdir);
+    /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+    if (IPPROTO_TCP == protocol)
+    {
+        /* only established */
+        /*TCPÈý´ÎÎÕÊֳɹ¦*/
+        if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+        {
+            goto accept;
+        }
+    }
+    else if (IPPROTO_UDP != protocol)
+    {
+        goto accept;
+    }
+
+    spin_lock_bh(&fastnat_spinlock);
+    if (!(nat_entry = fast_get_entry(&working_list, ct, dir)))
+    {
+        print_sun(SUN_DBG,"napt_handle() nat_entry=%p\n", nat_entry);
+        spin_unlock_bh(&fastnat_spinlock);
+        goto accept;
+    }
+    nat_entry->fast_spinlock = &fastnat_spinlock;
+
+    //Ê״ν¨Á´£¬»ñÈ¡ct¼ÆÊýËø£¬²¢É¾³ýct¶¨Ê±Æ÷£»Ê״ν¨Á´Öظ´°ü£¬²»ÄܲÙ×÷
+    if (!(nat_entry->flags & FAST_ALL_DIR))
+    {
+        nf_conntrack_get(&ct->ct_general);
+        //del_timer(&ct->timeout);
+        ct->timeout = nat_entry->timeout.expires;
+
+    }
+
+    entry_data = &nat_entry->data[dir];
+    entry_data->tuplehash.tuple = ct->tuplehash[dir].tuple;
+    memcpy(entry_data->dmac, _neighbour->ha, ETH_ALEN);
+    entry_data->priority = skb->priority;
+    entry_data->mark = skb->mark;
+    entry_data->outdev = out;
+
+    /*¼Ç¼MACµØÖ·µ½entry_data->hh_data*/
+    if (!record_MAC_header(working_hash, ct, nat_entry, entry_data, _neighbour, out, htons(ETH_P_IP)))
+    {
+        spin_unlock_bh(&fastnat_spinlock);
+        goto accept;
+    }
+    print_sun(SUN_DBG,"napt_handle() ct->status=0x%x\n", ct->status);
+    /*»ñÈ¡natת»»ÐèÒªµÄIPºÍportÐÅÏ¢*/
+    if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
+    {
+        if(IP_CT_DIR_ORIGINAL == dir)
+        {
+            entry_data->nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+            entry_data->nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+            entry_data->type = FN_TYPE_SRC;
+        }
+        else
+        {
+            entry_data->nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+            entry_data->nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+            entry_data->type = FN_TYPE_DST;
+        }
+    }
+    else if (test_bit(IPS_DST_NAT_BIT, &ct->status))
+    {
+        if (IP_CT_DIR_ORIGINAL == dir)
+        {
+            entry_data->nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+            entry_data->nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+            entry_data->type = FN_TYPE_DST;
+        }
+        else
+        {
+            entry_data->nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+            entry_data->nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+            entry_data->type = FN_TYPE_SRC;
+        }
+    }
+    else //²»Ö§³ÖNATµÄ³¡¾°
+    {
+        //´ËÖÖ³¡¾°£¬Ô´µØÖ·/Ä¿µÄµØÖ·/¶Ë¿ÚºÅ¶¼²»ÐèÒª¸Ä±ä£¬Ö±½Ó͸´«¾ÍÐÐ
+        entry_data->is_not_nat = 1;
+    }
+
+    //´Ë´¦±£Ö¤Õý·´Á½¸ö±ê¼Çλ²»³åÍ»
+    nat_entry->flags = nat_entry->flags | (1 << dir);
+
+    //Ìí¼Óhash½Úµã
+    fast_add_entry(working_hash, entry_data);
+    if (nat_entry->flags == FAST_ALL_DIR)
+    {
+        nat_entry->data[0].indev = nat_entry->data[1].outdev;
+        nat_entry->data[1].indev = nat_entry->data[0].outdev;
+    }
+
+    spin_lock_bh(&fast_fw_spinlock);
+    ct->fast_ct.isFast = FAST_CT_WND4;
+    spin_unlock_bh(&fast_fw_spinlock);
+
+    spin_unlock_bh(&fastnat_spinlock);
+
+accept:
+    neigh_release(_neighbour);
+    return NF_ACCEPT;
+}
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+int fastnat_event(traverse_command_t *cmd)
+{
+    spin_lock_bh(&fastnat_spinlock);
+    traverse_process(&working_list, cmd);
+    spin_unlock_bh(&fastnat_spinlock);
+    return 0;
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv4¿ìËÙת·¢ÐÅÏ¢
+void fastnat_cleanup_links(void)
+{
+    spin_lock_bh(&fastnat_spinlock);
+    fast_cleanup_links(&working_list);
+    spin_unlock_bh(&fastnat_spinlock);
+}
+
+/*fastnat³õʼ»¯*/
+int tsp_fastnat_init(void)
+{
+    int ret;
+
+    print_sun(SUN_DBG,"start init fastnat\n");
+
+    working_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, /*&fastnat_hash_vmalloc,*/ 1);
+    if (!working_hash)
+    {
+        print_sun(SUN_ERR, "Unable to create working_hash\n");
+        return -EINVAL;
+    }
+
+    spin_lock_init(&fastnat_spinlock);
+
+    ret = nf_register_net_hook(&init_net, &nat_hook);
+    if (ret != 0)
+    {
+        print_sun(SUN_ERR,"init fastnat failed\n");
+        goto err;
+    }
+    print_sun(SUN_DBG,"init fastnat done\n");
+
+    return 0;
+
+err:
+    nf_ct_free_hashtable(working_hash, /*fastnat_hash_vmalloc, */nf_conntrack_htable_size);
+    return -EINVAL;
+}
+
+int tsp_fastnat_cleanup(void)
+{
+    nf_unregister_net_hook(&init_net, &nat_hook);
+    nf_ct_free_hashtable(working_hash, /*fastnat_hash_vmalloc,*/ nf_conntrack_htable_size);
+
+    print_sun(SUN_DBG,"fastnat cleanup done\n");
+    return 0;
+}
+
diff --git a/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c b/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c
new file mode 100755
index 0000000..e92413e
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c
@@ -0,0 +1,2837 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Connection state tracking for netfilter.  This is separated from,
+   but required by, the NAT layer; it can also be used by an iptables
+   extension. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/siphash.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+#include <linux/mm.h>
+#include <linux/nsproxy.h>
+#include <linux/rculist_nulls.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netns/hash.h>
+#include <net/ip.h>
+
+#include "nf_internals.h"
+
+#ifdef CONFIG_FASTNAT_MODULE
+#include <net/SI/fast_common.h>
+#endif
+
+__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+EXPORT_SYMBOL_GPL(nf_conntrack_locks);
+
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
+
+struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+
+struct conntrack_gc_work {
+	struct delayed_work	dwork;
+	u32			next_bucket;
+	bool			exiting;
+	bool			early_drop;
+};
+
+static __read_mostly struct kmem_cache *nf_conntrack_cachep;
+static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
+static __read_mostly bool nf_conntrack_locks_all;
+
+/* serialize hash resizes and nf_ct_iterate_cleanup */
+static DEFINE_MUTEX(nf_conntrack_mutex);
+
+#define GC_SCAN_INTERVAL	(120u * HZ)
+#define GC_SCAN_MAX_DURATION	msecs_to_jiffies(10)
+
+static struct conntrack_gc_work conntrack_gc_work;
+
+void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+{
+	/* 1) Acquire the lock */
+	spin_lock(lock);
+
+	/* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
+	 * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
+	 */
+	if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
+		return;
+
+	/* fast path failed, unlock */
+	spin_unlock(lock);
+
+	/* Slow path 1) get global lock */
+	spin_lock(&nf_conntrack_locks_all_lock);
+
+	/* Slow path 2) get the lock we want */
+	spin_lock(lock);
+
+	/* Slow path 3) release the global lock */
+	spin_unlock(&nf_conntrack_locks_all_lock);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_lock);
+
+static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
+{
+	h1 %= CONNTRACK_LOCKS;
+	h2 %= CONNTRACK_LOCKS;
+	spin_unlock(&nf_conntrack_locks[h1]);
+	if (h1 != h2)
+		spin_unlock(&nf_conntrack_locks[h2]);
+}
+
+/* return true if we need to recompute hashes (in case hash table was resized) */
+static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
+				     unsigned int h2, unsigned int sequence)
+{
+	h1 %= CONNTRACK_LOCKS;
+	h2 %= CONNTRACK_LOCKS;
+	if (h1 <= h2) {
+		nf_conntrack_lock(&nf_conntrack_locks[h1]);
+		if (h1 != h2)
+			spin_lock_nested(&nf_conntrack_locks[h2],
+					 SINGLE_DEPTH_NESTING);
+	} else {
+		nf_conntrack_lock(&nf_conntrack_locks[h2]);
+		spin_lock_nested(&nf_conntrack_locks[h1],
+				 SINGLE_DEPTH_NESTING);
+	}
+	if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
+		nf_conntrack_double_unlock(h1, h2);
+		return true;
+	}
+	return false;
+}
+
+static void nf_conntrack_all_lock(void)
+	__acquires(&nf_conntrack_locks_all_lock)
+{
+	int i;
+
+	spin_lock(&nf_conntrack_locks_all_lock);
+
+	nf_conntrack_locks_all = true;
+
+	for (i = 0; i < CONNTRACK_LOCKS; i++) {
+		spin_lock(&nf_conntrack_locks[i]);
+
+		/* This spin_unlock provides the "release" to ensure that
+		 * nf_conntrack_locks_all==true is visible to everyone that
+		 * acquired spin_lock(&nf_conntrack_locks[]).
+		 */
+		spin_unlock(&nf_conntrack_locks[i]);
+	}
+}
+
+static void nf_conntrack_all_unlock(void)
+	__releases(&nf_conntrack_locks_all_lock)
+{
+	/* All prior stores must be complete before we clear
+	 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
+	 * might observe the false value but not the entire
+	 * critical section.
+	 * It pairs with the smp_load_acquire() in nf_conntrack_lock()
+	 */
+	smp_store_release(&nf_conntrack_locks_all, false);
+	spin_unlock(&nf_conntrack_locks_all_lock);
+}
+
+unsigned int nf_conntrack_htable_size __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
+
+unsigned int nf_conntrack_max __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_max);
+seqcount_spinlock_t nf_conntrack_generation __read_mostly;
+static unsigned int nf_conntrack_hash_rnd __read_mostly;
+
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+			      const struct net *net)
+{
+	unsigned int n;
+	u32 seed;
+
+	get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
+
+	/* The direction must be ignored, so we hash everything up to the
+	 * destination ports (which is a multiple of 4) and treat the last
+	 * three bytes manually.
+	 */
+	seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
+	n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
+	return jhash2((u32 *)tuple, n, seed ^
+		      (((__force __u16)tuple->dst.u.all << 16) |
+		      tuple->dst.protonum));
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+u32 hash_conntrack_raw_fast(const struct nf_conntrack_tuple *tuple,
+			      const struct net *net)
+{
+	return hash_conntrack_raw(tuple, net);
+}
+#endif
+
+static u32 scale_hash(u32 hash)
+{
+	return reciprocal_scale(hash, nf_conntrack_htable_size);
+}
+
+static u32 __hash_conntrack(const struct net *net,
+			    const struct nf_conntrack_tuple *tuple,
+			    unsigned int size)
+{
+	return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
+}
+
+static u32 hash_conntrack(const struct net *net,
+			  const struct nf_conntrack_tuple *tuple)
+{
+	return scale_hash(hash_conntrack_raw(tuple, net));
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+u_int32_t hash_conntrack_fast(const struct nf_conntrack_tuple *tuple)
+{
+  return __hash_conntrack(&init_net, tuple, 32);
+}
+#endif
+
+static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
+				  unsigned int dataoff,
+				  struct nf_conntrack_tuple *tuple)
+{	struct {
+		__be16 sport;
+		__be16 dport;
+	} _inet_hdr, *inet_hdr;
+
+	/* Actually only need first 4 bytes to get ports. */
+	inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
+	if (!inet_hdr)
+		return false;
+
+	tuple->src.u.udp.port = inet_hdr->sport;
+	tuple->dst.u.udp.port = inet_hdr->dport;
+	return true;
+}
+
+static bool
+nf_ct_get_tuple(const struct sk_buff *skb,
+		unsigned int nhoff,
+		unsigned int dataoff,
+		u_int16_t l3num,
+		u_int8_t protonum,
+		struct net *net,
+		struct nf_conntrack_tuple *tuple)
+{
+	unsigned int size;
+	const __be32 *ap;
+	__be32 _addrs[8];
+
+	memset(tuple, 0, sizeof(*tuple));
+
+	tuple->src.l3num = l3num;
+	switch (l3num) {
+	case NFPROTO_IPV4:
+		nhoff += offsetof(struct iphdr, saddr);
+		size = 2 * sizeof(__be32);
+		break;
+	case NFPROTO_IPV6:
+		nhoff += offsetof(struct ipv6hdr, saddr);
+		size = sizeof(_addrs);
+		break;
+	default:
+		return true;
+	}
+
+	ap = skb_header_pointer(skb, nhoff, size, _addrs);
+	if (!ap)
+		return false;
+
+	switch (l3num) {
+	case NFPROTO_IPV4:
+		tuple->src.u3.ip = ap[0];
+		tuple->dst.u3.ip = ap[1];
+		break;
+	case NFPROTO_IPV6:
+		memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
+		memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
+		break;
+	}
+
+	tuple->dst.protonum = protonum;
+	tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+	switch (protonum) {
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ICMPV6:
+		return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+	case IPPROTO_ICMP:
+		return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
+#ifdef CONFIG_NF_CT_PROTO_GRE
+	case IPPROTO_GRE:
+		return gre_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+	case IPPROTO_TCP:
+	case IPPROTO_UDP: /* fallthrough */
+		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+	case IPPROTO_UDPLITE:
+		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+	case IPPROTO_SCTP:
+		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+	case IPPROTO_DCCP:
+		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+	default:
+		break;
+	}
+
+	return true;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+bool nf_ct_get_tuple_fast(const struct sk_buff *skb,
+		unsigned int nhoff,
+		unsigned int dataoff,
+		u_int16_t l3num,
+		u_int8_t protonum,
+		struct net *net,
+		struct nf_conntrack_tuple *tuple)
+{
+  return nf_ct_get_tuple(skb, nhoff, dataoff, l3num, protonum, net, tuple);
+}
+#endif
+
+static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+			    u_int8_t *protonum)
+{
+	int dataoff = -1;
+	const struct iphdr *iph;
+	struct iphdr _iph;
+
+	iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+	if (!iph)
+		return -1;
+
+	/* Conntrack defragments packets, we might still see fragments
+	 * inside ICMP packets though.
+	 */
+	if (iph->frag_off & htons(IP_OFFSET))
+		return -1;
+
+	dataoff = nhoff + (iph->ihl << 2);
+	*protonum = iph->protocol;
+
+	/* Check bogus IP headers */
+	if (dataoff > skb->len) {
+		pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
+			 nhoff, iph->ihl << 2, skb->len);
+		return -1;
+	}
+	return dataoff;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+			    u8 *protonum)
+{
+	int protoff = -1;
+	unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
+	__be16 frag_off;
+	u8 nexthdr;
+
+	if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
+			  &nexthdr, sizeof(nexthdr)) != 0) {
+		pr_debug("can't get nexthdr\n");
+		return -1;
+	}
+	protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
+	/*
+	 * (protoff == skb->len) means the packet has not data, just
+	 * IPv6 and possibly extensions headers, but it is tracked anyway
+	 */
+	if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+		pr_debug("can't find proto in pkt\n");
+		return -1;
+	}
+
+	*protonum = nexthdr;
+	return protoff;
+}
+#endif
+
+static int get_l4proto(const struct sk_buff *skb,
+		       unsigned int nhoff, u8 pf, u8 *l4num)
+{
+	switch (pf) {
+	case NFPROTO_IPV4:
+		return ipv4_get_l4proto(skb, nhoff, l4num);
+#if IS_ENABLED(CONFIG_IPV6)
+	case NFPROTO_IPV6:
+		return ipv6_get_l4proto(skb, nhoff, l4num);
+#endif
+	default:
+		*l4num = 0;
+		break;
+	}
+	return -1;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+int get_l4proto_fast(const struct sk_buff *skb,
+		       unsigned int nhoff, u8 pf, u8 *l4num)
+{
+	return get_l4proto(skb, nhoff, pf, l4num);
+}
+#endif
+
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+		       u_int16_t l3num,
+		       struct net *net, struct nf_conntrack_tuple *tuple)
+{
+	u8 protonum;
+	int protoff;
+
+	protoff = get_l4proto(skb, nhoff, l3num, &protonum);
+	if (protoff <= 0)
+		return false;
+
+	return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
+
+bool
+nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+		   const struct nf_conntrack_tuple *orig)
+{
+	memset(inverse, 0, sizeof(*inverse));
+
+	inverse->src.l3num = orig->src.l3num;
+
+	switch (orig->src.l3num) {
+	case NFPROTO_IPV4:
+		inverse->src.u3.ip = orig->dst.u3.ip;
+		inverse->dst.u3.ip = orig->src.u3.ip;
+		break;
+	case NFPROTO_IPV6:
+		inverse->src.u3.in6 = orig->dst.u3.in6;
+		inverse->dst.u3.in6 = orig->src.u3.in6;
+		break;
+	default:
+		break;
+	}
+
+	inverse->dst.dir = !orig->dst.dir;
+
+	inverse->dst.protonum = orig->dst.protonum;
+
+	switch (orig->dst.protonum) {
+	case IPPROTO_ICMP:
+		return nf_conntrack_invert_icmp_tuple(inverse, orig);
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ICMPV6:
+		return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
+#endif
+	}
+
+	inverse->src.u.all = orig->dst.u.all;
+	inverse->dst.u.all = orig->src.u.all;
+	return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
+
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+	static __read_mostly siphash_key_t ct_id_seed;
+	unsigned long a, b, c, d;
+
+	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+	a = (unsigned long)ct;
+	b = (unsigned long)ct->master;
+	c = (unsigned long)nf_ct_net(ct);
+	d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				   sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
+				   &ct_id_seed);
+#ifdef CONFIG_64BIT
+	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+	return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
+static void
+clean_from_lists(struct nf_conn *ct)
+{
+	pr_debug("clean_from_lists(%p)\n", ct);
+	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
+
+	/* Destroy all pending expectations */
+	nf_ct_remove_expectations(ct);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_add_to_dying_list(struct nf_conn *ct)
+{
+	struct ct_pcpu *pcpu;
+
+	/* add this conntrack to the (per cpu) dying list */
+	ct->cpu = smp_processor_id();
+	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+	spin_lock(&pcpu->lock);
+	hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+			     &pcpu->dying);
+	spin_unlock(&pcpu->lock);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
+{
+	struct ct_pcpu *pcpu;
+
+	/* add this conntrack to the (per cpu) unconfirmed list */
+	ct->cpu = smp_processor_id();
+	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+	spin_lock(&pcpu->lock);
+	hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+			     &pcpu->unconfirmed);
+	spin_unlock(&pcpu->lock);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
+{
+	struct ct_pcpu *pcpu;
+
+	/* We overload first tuple to link into unconfirmed or dying list.*/
+	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+	spin_lock(&pcpu->lock);
+	BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+	spin_unlock(&pcpu->lock);
+}
+
+#define NFCT_ALIGN(len)	(((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
+
+/* Released via destroy_conntrack() */
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+				 const struct nf_conntrack_zone *zone,
+				 gfp_t flags)
+{
+	struct nf_conn *tmpl, *p;
+
+	if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
+		tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
+		if (!tmpl)
+			return NULL;
+
+		p = tmpl;
+		tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
+		if (tmpl != p) {
+			tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
+			tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
+		}
+	} else {
+		tmpl = kzalloc(sizeof(*tmpl), flags);
+		if (!tmpl)
+			return NULL;
+	}
+
+	tmpl->status = IPS_TEMPLATE;
+	write_pnet(&tmpl->ct_net, net);
+	nf_ct_zone_add(tmpl, zone);
+	atomic_set(&tmpl->ct_general.use, 0);
+
+	return tmpl;
+}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
+
+void nf_ct_tmpl_free(struct nf_conn *tmpl)
+{
+	nf_ct_ext_destroy(tmpl);
+
+	if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
+		kfree((char *)tmpl - tmpl->proto.tmpl_padto);
+	else
+		kfree(tmpl);
+}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
+
+static void destroy_gre_conntrack(struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CT_PROTO_GRE
+	struct nf_conn *master = ct->master;
+
+	if (master)
+		nf_ct_gre_keymap_destroy(master);
+#endif
+}
+
+static void
+destroy_conntrack(struct nf_conntrack *nfct)
+{
+	struct nf_conn *ct = (struct nf_conn *)nfct;
+
+	pr_debug("destroy_conntrack(%p)\n", ct);
+	WARN_ON(atomic_read(&nfct->use) != 0);
+
+	if (unlikely(nf_ct_is_template(ct))) {
+		nf_ct_tmpl_free(ct);
+		return;
+	}
+
+	if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
+		destroy_gre_conntrack(ct);
+
+	local_bh_disable();
+	/* Expectations will have been removed in clean_from_lists,
+	 * except TFTP can create an expectation on the first packet,
+	 * before connection is in the list, so we need to clean here,
+	 * too.
+	 */
+	nf_ct_remove_expectations(ct);
+
+	nf_ct_del_from_dying_or_unconfirmed_list(ct);
+
+	local_bh_enable();
+
+	if (ct->master)
+		nf_ct_put(ct->master);
+
+	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
+	nf_conntrack_free(ct);
+}
+
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
+{
+	struct net *net = nf_ct_net(ct);
+	unsigned int hash, reply_hash;
+	unsigned int sequence;
+
+	nf_ct_helper_destroy(ct);
+
+	local_bh_disable();
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		hash = hash_conntrack(net,
+				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+		reply_hash = hash_conntrack(net,
+					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+	clean_from_lists(ct);
+	nf_conntrack_double_unlock(hash, reply_hash);
+
+	nf_ct_add_to_dying_list(ct);
+
+	local_bh_enable();
+}
+
+bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
+{
+	struct nf_conn_tstamp *tstamp;
+
+	if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
+		return false;
+
+	tstamp = nf_conn_tstamp_find(ct);
+	if (tstamp) {
+		s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
+
+		tstamp->stop = ktime_get_real_ns();
+		if (timeout < 0)
+			tstamp->stop -= jiffies_to_nsecs(-timeout);
+	}
+
+	if (nf_conntrack_event_report(IPCT_DESTROY, ct,
+				    portid, report) < 0) {
+		/* destroy event was not delivered. nf_ct_put will
+		 * be done by event cache worker on redelivery.
+		 */
+		nf_ct_delete_from_lists(ct);
+		nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
+		return false;
+	}
+
+	nf_conntrack_ecache_work(nf_ct_net(ct));
+	nf_ct_delete_from_lists(ct);
+	nf_ct_put(ct);
+	return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_delete);
+
+static inline bool
+nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
+		const struct nf_conntrack_tuple *tuple,
+		const struct nf_conntrack_zone *zone,
+		const struct net *net)
+{
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+	/* A conntrack can be recreated with the equal tuple,
+	 * so we need to check that the conntrack is confirmed
+	 */
+	return nf_ct_tuple_equal(tuple, &h->tuple) &&
+	       nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+	       nf_ct_is_confirmed(ct) &&
+	       net_eq(net, nf_ct_net(ct));
+}
+
+static inline bool
+nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
+{
+	return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				 &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+	       nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
+				 &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
+	       nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
+	       nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
+	       net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
+}
+
+/* caller must hold rcu readlock and none of the nf_conntrack_locks */
+static void nf_ct_gc_expired(struct nf_conn *ct)
+{
+	if (!atomic_inc_not_zero(&ct->ct_general.use))
+		return;
+
+	if (nf_ct_should_gc(ct))
+		nf_ct_kill(ct);
+
+	nf_ct_put(ct);
+}
+
+/*
+ * Warning :
+ * - Caller must take a reference on returned object
+ *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
+ */
+static struct nf_conntrack_tuple_hash *
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
+		      const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_head *ct_hash;
+	struct hlist_nulls_node *n;
+	unsigned int bucket, hsize;
+
+begin:
+	nf_conntrack_get_ht(&ct_hash, &hsize);
+	bucket = reciprocal_scale(hash, hsize);
+
+	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
+		struct nf_conn *ct;
+
+		ct = nf_ct_tuplehash_to_ctrack(h);
+		if (nf_ct_is_expired(ct)) {
+			nf_ct_gc_expired(ct);
+			continue;
+		}
+
+		if (nf_ct_key_equal(h, tuple, zone, net))
+			return h;
+	}
+	/*
+	 * if the nulls value we got at the end of this lookup is
+	 * not the expected one, we must restart lookup.
+	 * We probably met an item that was moved to another chain.
+	 */
+	if (get_nulls_value(n) != bucket) {
+		NF_CT_STAT_INC_ATOMIC(net, search_restart);
+		goto begin;
+	}
+
+	return NULL;
+}
+
+/* Find a connection corresponding to a tuple. */
+static struct nf_conntrack_tuple_hash *
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+			const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+
+	rcu_read_lock();
+
+	h = ____nf_conntrack_find(net, zone, tuple, hash);
+	if (h) {
+		/* We have a candidate that matches the tuple we're interested
+		 * in, try to obtain a reference and re-check tuple
+		 */
+		ct = nf_ct_tuplehash_to_ctrack(h);
+		if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
+			if (likely(nf_ct_key_equal(h, tuple, zone, net)))
+				goto found;
+
+			/* TYPESAFE_BY_RCU recycled the candidate */
+			nf_ct_put(ct);
+		}
+
+		h = NULL;
+	}
+found:
+	rcu_read_unlock();
+
+	return h;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+struct nf_conntrack_tuple_hash *nf_conntrack_find_fast(struct net *net, const struct nf_conntrack_zone *zone,
+			  const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+	return __nf_conntrack_find_get(net, zone, tuple, hash);
+}
+#endif
+
+struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+		      const struct nf_conntrack_tuple *tuple)
+{
+	return __nf_conntrack_find_get(net, zone, tuple,
+				       hash_conntrack_raw(tuple, net));
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
+
+static void __nf_conntrack_hash_insert(struct nf_conn *ct,
+				       unsigned int hash,
+				       unsigned int reply_hash)
+{
+	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+			   &nf_conntrack_hash[hash]);
+	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+			   &nf_conntrack_hash[reply_hash]);
+}
+
+int
+nf_conntrack_hash_check_insert(struct nf_conn *ct)
+{
+	const struct nf_conntrack_zone *zone;
+	struct net *net = nf_ct_net(ct);
+	unsigned int hash, reply_hash;
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_node *n;
+	unsigned int sequence;
+
+	zone = nf_ct_zone(ct);
+
+	local_bh_disable();
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		hash = hash_conntrack(net,
+				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+		reply_hash = hash_conntrack(net,
+					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+	/* See if there's one in the list already, including reverse */
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				    zone, net))
+			goto out;
+
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+				    zone, net))
+			goto out;
+
+	smp_wmb();
+	/* The caller holds a reference to this object */
+	atomic_set(&ct->ct_general.use, 2);
+	__nf_conntrack_hash_insert(ct, hash, reply_hash);
+	nf_conntrack_double_unlock(hash, reply_hash);
+	NF_CT_STAT_INC(net, insert);
+	local_bh_enable();
+	return 0;
+
+out:
+	nf_conntrack_double_unlock(hash, reply_hash);
+	local_bh_enable();
+	return -EEXIST;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
+
+void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
+		    unsigned int bytes)
+{
+	struct nf_conn_acct *acct;
+
+	acct = nf_conn_acct_find(ct);
+	if (acct) {
+		struct nf_conn_counter *counter = acct->counter;
+
+		atomic64_add(packets, &counter[dir].packets);
+		atomic64_add(bytes, &counter[dir].bytes);
+	}
+}
+EXPORT_SYMBOL_GPL(nf_ct_acct_add);
+
+static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			     const struct nf_conn *loser_ct)
+{
+	struct nf_conn_acct *acct;
+
+	acct = nf_conn_acct_find(loser_ct);
+	if (acct) {
+		struct nf_conn_counter *counter = acct->counter;
+		unsigned int bytes;
+
+		/* u32 should be fine since we must have seen one packet. */
+		bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
+		nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
+	}
+}
+
+static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
+{
+	struct nf_conn_tstamp *tstamp;
+
+	atomic_inc(&ct->ct_general.use);
+	ct->status |= IPS_CONFIRMED;
+
+	/* set conntrack timestamp, if enabled. */
+	tstamp = nf_conn_tstamp_find(ct);
+	if (tstamp)
+		tstamp->start = ktime_get_real_ns();
+}
+
+/* caller must hold locks to prevent concurrent changes */
+static int __nf_ct_resolve_clash(struct sk_buff *skb,
+				 struct nf_conntrack_tuple_hash *h)
+{
+	/* This is the conntrack entry already in hashes that won race. */
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *loser_ct;
+
+	loser_ct = nf_ct_get(skb, &ctinfo);
+
+	if (nf_ct_is_dying(ct))
+		return NF_DROP;
+
+	if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
+	    nf_ct_match(ct, loser_ct)) {
+		struct net *net = nf_ct_net(ct);
+
+		nf_conntrack_get(&ct->ct_general);
+
+		nf_ct_acct_merge(ct, ctinfo, loser_ct);
+		nf_ct_add_to_dying_list(loser_ct);
+		nf_conntrack_put(&loser_ct->ct_general);
+		nf_ct_set(skb, ct, ctinfo);
+
+		NF_CT_STAT_INC(net, clash_resolve);
+		return NF_ACCEPT;
+	}
+
+	return NF_DROP;
+}
+
+/**
+ * nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
+ *
+ * @skb: skb that causes the collision
+ * @repl_idx: hash slot for reply direction
+ *
+ * Called when origin or reply direction had a clash.
+ * The skb can be handled without packet drop provided the reply direction
+ * is unique or there the existing entry has the identical tuple in both
+ * directions.
+ *
+ * Caller must hold conntrack table locks to prevent concurrent updates.
+ *
+ * Returns NF_DROP if the clash could not be handled.
+ */
+static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
+{
+	struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
+	const struct nf_conntrack_zone *zone;
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_node *n;
+	struct net *net;
+
+	zone = nf_ct_zone(loser_ct);
+	net = nf_ct_net(loser_ct);
+
+	/* Reply direction must never result in a clash, unless both origin
+	 * and reply tuples are identical.
+	 */
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
+		if (nf_ct_key_equal(h,
+				    &loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+				    zone, net))
+			return __nf_ct_resolve_clash(skb, h);
+	}
+
+	/* We want the clashing entry to go away real soon: 1 second timeout. */
+	WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
+
+	/* IPS_NAT_CLASH removes the entry automatically on the first
+	 * reply.  Also prevents UDP tracker from moving the entry to
+	 * ASSURED state, i.e. the entry can always be evicted under
+	 * pressure.
+	 */
+	loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
+
+	__nf_conntrack_insert_prepare(loser_ct);
+
+	/* fake add for ORIGINAL dir: we want lookups to only find the entry
+	 * already in the table.  This also hides the clashing entry from
+	 * ctnetlink iteration, i.e. conntrack -L won't show them.
+	 */
+	hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+
+	hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+				 &nf_conntrack_hash[repl_idx]);
+
+	NF_CT_STAT_INC(net, clash_resolve);
+	return NF_ACCEPT;
+}
+
+/**
+ * nf_ct_resolve_clash - attempt to handle clash without packet drop
+ *
+ * @skb: skb that causes the clash
+ * @h: tuplehash of the clashing entry already in table
+ * @reply_hash: hash slot for reply direction
+ *
+ * A conntrack entry can be inserted to the connection tracking table
+ * if there is no existing entry with an identical tuple.
+ *
+ * If there is one, @skb (and the assocated, unconfirmed conntrack) has
+ * to be dropped.  In case @skb is retransmitted, next conntrack lookup
+ * will find the already-existing entry.
+ *
+ * The major problem with such packet drop is the extra delay added by
+ * the packet loss -- it will take some time for a retransmit to occur
+ * (or the sender to time out when waiting for a reply).
+ *
+ * This function attempts to handle the situation without packet drop.
+ *
+ * If @skb has no NAT transformation or if the colliding entries are
+ * exactly the same, only the to-be-confirmed conntrack entry is discarded
+ * and @skb is associated with the conntrack entry already in the table.
+ *
+ * Failing that, the new, unconfirmed conntrack is still added to the table
+ * provided that the collision only occurs in the ORIGINAL direction.
+ * The new entry will be added only in the non-clashing REPLY direction,
+ * so packets in the ORIGINAL direction will continue to match the existing
+ * entry.  The new entry will also have a fixed timeout so it expires --
+ * due to the collision, it will only see reply traffic.
+ *
+ * Returns NF_DROP if the clash could not be resolved.
+ */
+static __cold noinline int
+nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
+		    u32 reply_hash)
+{
+	/* This is the conntrack entry already in hashes that won race. */
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+	const struct nf_conntrack_l4proto *l4proto;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *loser_ct;
+	struct net *net;
+	int ret;
+
+	loser_ct = nf_ct_get(skb, &ctinfo);
+	net = nf_ct_net(loser_ct);
+
+	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+	if (!l4proto->allow_clash)
+		goto drop;
+
+	ret = __nf_ct_resolve_clash(skb, h);
+	if (ret == NF_ACCEPT)
+		return ret;
+
+	ret = nf_ct_resolve_clash_harder(skb, reply_hash);
+	if (ret == NF_ACCEPT)
+		return ret;
+
+drop:
+	nf_ct_add_to_dying_list(loser_ct);
+	NF_CT_STAT_INC(net, drop);
+	NF_CT_STAT_INC(net, insert_failed);
+	return NF_DROP;
+}
+
+/* Confirm a connection given skb; places it in hash table */
+int
+__nf_conntrack_confirm(struct sk_buff *skb)
+{
+	const struct nf_conntrack_zone *zone;
+	unsigned int hash, reply_hash;
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+	struct nf_conn_help *help;
+	struct hlist_nulls_node *n;
+	enum ip_conntrack_info ctinfo;
+	struct net *net;
+	unsigned int sequence;
+	int ret = NF_DROP;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	net = nf_ct_net(ct);
+
+	/* ipt_REJECT uses nf_conntrack_attach to attach related
+	   ICMP/TCP RST packets in other direction.  Actual packet
+	   which created connection will be IP_CT_NEW or for an
+	   expected connection, IP_CT_RELATED. */
+	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+		return NF_ACCEPT;
+
+	zone = nf_ct_zone(ct);
+	local_bh_disable();
+
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		/* reuse the hash saved before */
+		hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
+		hash = scale_hash(hash);
+		reply_hash = hash_conntrack(net,
+					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+	/* We're not in hash table, and we refuse to set up related
+	 * connections for unconfirmed conns.  But packet copies and
+	 * REJECT will give spurious warnings here.
+	 */
+
+	/* Another skb with the same unconfirmed conntrack may
+	 * win the race. This may happen for bridge(br_flood)
+	 * or broadcast/multicast packets do skb_clone with
+	 * unconfirmed conntrack.
+	 */
+	if (unlikely(nf_ct_is_confirmed(ct))) {
+		WARN_ON_ONCE(1);
+		nf_conntrack_double_unlock(hash, reply_hash);
+		local_bh_enable();
+		return NF_DROP;
+	}
+
+	pr_debug("Confirming conntrack %p\n", ct);
+	/* We have to check the DYING flag after unlink to prevent
+	 * a race against nf_ct_get_next_corpse() possibly called from
+	 * user context, else we insert an already 'dead' hash, blocking
+	 * further use of that particular connection -JM.
+	 */
+	nf_ct_del_from_dying_or_unconfirmed_list(ct);
+
+	if (unlikely(nf_ct_is_dying(ct))) {
+		nf_ct_add_to_dying_list(ct);
+		NF_CT_STAT_INC(net, insert_failed);
+		goto dying;
+	}
+
+	/* See if there's one in the list already, including reverse:
+	   NAT could have grabbed it without realizing, since we're
+	   not in the hash.  If there is, we lost race. */
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				    zone, net))
+			goto out;
+
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+				    zone, net))
+			goto out;
+
+	/* Timer relative to confirmation time, not original
+	   setting time, otherwise we'd get timer wrap in
+	   weird delay cases. */
+	ct->timeout += nfct_time_stamp;
+
+	__nf_conntrack_insert_prepare(ct);
+
+	/* Since the lookup is lockless, hash insertion must be done after
+	 * starting the timer and setting the CONFIRMED bit. The RCU barriers
+	 * guarantee that no other CPU can find the conntrack before the above
+	 * stores are visible.
+	 */
+	__nf_conntrack_hash_insert(ct, hash, reply_hash);
+	nf_conntrack_double_unlock(hash, reply_hash);
+	local_bh_enable();
+
+	help = nfct_help(ct);
+	if (help && help->helper)
+		nf_conntrack_event_cache(IPCT_HELPER, ct);
+
+	nf_conntrack_event_cache(master_ct(ct) ?
+				 IPCT_RELATED : IPCT_NEW, ct);
+	return NF_ACCEPT;
+
+out:
+	ret = nf_ct_resolve_clash(skb, h, reply_hash);
+dying:
+	nf_conntrack_double_unlock(hash, reply_hash);
+	local_bh_enable();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
+
+/* Returns true if a connection correspondings to the tuple (required
+   for NAT). */
+int
+nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+			 const struct nf_conn *ignored_conntrack)
+{
+	struct net *net = nf_ct_net(ignored_conntrack);
+	const struct nf_conntrack_zone *zone;
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_head *ct_hash;
+	unsigned int hash, hsize;
+	struct hlist_nulls_node *n;
+	struct nf_conn *ct;
+
+	zone = nf_ct_zone(ignored_conntrack);
+
+	rcu_read_lock();
+ begin:
+	nf_conntrack_get_ht(&ct_hash, &hsize);
+	hash = __hash_conntrack(net, tuple, hsize);
+
+	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
+		ct = nf_ct_tuplehash_to_ctrack(h);
+
+		if (ct == ignored_conntrack)
+			continue;
+
+		if (nf_ct_is_expired(ct)) {
+			nf_ct_gc_expired(ct);
+			continue;
+		}
+
+		if (nf_ct_key_equal(h, tuple, zone, net)) {
+			/* Tuple is taken already, so caller will need to find
+			 * a new source port to use.
+			 *
+			 * Only exception:
+			 * If the *original tuples* are identical, then both
+			 * conntracks refer to the same flow.
+			 * This is a rare situation, it can occur e.g. when
+			 * more than one UDP packet is sent from same socket
+			 * in different threads.
+			 *
+			 * Let nf_ct_resolve_clash() deal with this later.
+			 */
+			if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+					      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+					      nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
+				continue;
+
+			NF_CT_STAT_INC_ATOMIC(net, found);
+			rcu_read_unlock();
+			return 1;
+		}
+	}
+
+	if (get_nulls_value(n) != hash) {
+		NF_CT_STAT_INC_ATOMIC(net, search_restart);
+		goto begin;
+	}
+
+	rcu_read_unlock();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
+
+#define NF_CT_EVICTION_RANGE	8
+
+/* There's a small race here where we may free a just-assured
+   connection.  Too bad: we're in trouble anyway. */
+static unsigned int early_drop_list(struct net *net,
+				    struct hlist_nulls_head *head)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_node *n;
+	unsigned int drops = 0;
+	struct nf_conn *tmp;
+
+	hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
+		tmp = nf_ct_tuplehash_to_ctrack(h);
+
+		if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
+			continue;
+
+		if (nf_ct_is_expired(tmp)) {
+			nf_ct_gc_expired(tmp);
+			continue;
+		}
+
+		if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
+		    !net_eq(nf_ct_net(tmp), net) ||
+		    nf_ct_is_dying(tmp))
+			continue;
+
+		if (!atomic_inc_not_zero(&tmp->ct_general.use))
+			continue;
+
+		/* kill only if still in same netns -- might have moved due to
+		 * SLAB_TYPESAFE_BY_RCU rules.
+		 *
+		 * We steal the timer reference.  If that fails timer has
+		 * already fired or someone else deleted it. Just drop ref
+		 * and move to next entry.
+		 */
+		if (net_eq(nf_ct_net(tmp), net) &&
+		    nf_ct_is_confirmed(tmp) &&
+		    nf_ct_delete(tmp, 0, 0))
+			drops++;
+
+		nf_ct_put(tmp);
+	}
+
+	return drops;
+}
+
+static noinline int early_drop(struct net *net, unsigned int hash)
+{
+	unsigned int i, bucket;
+
+	for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
+		struct hlist_nulls_head *ct_hash;
+		unsigned int hsize, drops;
+
+		rcu_read_lock();
+		nf_conntrack_get_ht(&ct_hash, &hsize);
+		if (!i)
+			bucket = reciprocal_scale(hash, hsize);
+		else
+			bucket = (bucket + 1) % hsize;
+
+		drops = early_drop_list(net, &ct_hash[bucket]);
+		rcu_read_unlock();
+
+		if (drops) {
+			NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool gc_worker_skip_ct(const struct nf_conn *ct)
+{
+	return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
+}
+
+static bool gc_worker_can_early_drop(const struct nf_conn *ct)
+{
+	const struct nf_conntrack_l4proto *l4proto;
+
+	if (!test_bit(IPS_ASSURED_BIT, &ct->status))
+		return true;
+
+	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+	if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
+		return true;
+
+	return false;
+}
+
+static void gc_worker(struct work_struct *work)
+{
+	unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
+	unsigned int i, hashsz, nf_conntrack_max95 = 0;
+	unsigned long next_run = GC_SCAN_INTERVAL;
+	struct conntrack_gc_work *gc_work;
+	gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
+
+	i = gc_work->next_bucket;
+	if (gc_work->early_drop)
+		nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
+
+	do {
+		struct nf_conntrack_tuple_hash *h;
+		struct hlist_nulls_head *ct_hash;
+		struct hlist_nulls_node *n;
+		struct nf_conn *tmp;
+
+		rcu_read_lock();
+
+		nf_conntrack_get_ht(&ct_hash, &hashsz);
+		if (i >= hashsz) {
+			rcu_read_unlock();
+			break;
+		}
+
+		hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
+			struct net *net;
+
+			tmp = nf_ct_tuplehash_to_ctrack(h);
+
+			if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
+				nf_ct_offload_timeout(tmp);
+				continue;
+			}
+
+			if (nf_ct_is_expired(tmp)) {
+				nf_ct_gc_expired(tmp);
+				continue;
+			}
+
+			if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
+				continue;
+
+			net = nf_ct_net(tmp);
+			if (atomic_read(&net->ct.count) < nf_conntrack_max95)
+				continue;
+
+			/* need to take reference to avoid possible races */
+			if (!atomic_inc_not_zero(&tmp->ct_general.use))
+				continue;
+
+			if (gc_worker_skip_ct(tmp)) {
+				nf_ct_put(tmp);
+				continue;
+			}
+
+			if (gc_worker_can_early_drop(tmp))
+				nf_ct_kill(tmp);
+
+			nf_ct_put(tmp);
+		}
+
+		/* could check get_nulls_value() here and restart if ct
+		 * was moved to another chain.  But given gc is best-effort
+		 * we will just continue with next hash slot.
+		 */
+		rcu_read_unlock();
+		cond_resched();
+		i++;
+
+		if (time_after(jiffies, end_time) && i < hashsz) {
+			gc_work->next_bucket = i;
+			next_run = 0;
+			break;
+		}
+	} while (i < hashsz);
+
+	if (gc_work->exiting)
+		return;
+
+	/*
+	 * Eviction will normally happen from the packet path, and not
+	 * from this gc worker.
+	 *
+	 * This worker is only here to reap expired entries when system went
+	 * idle after a busy period.
+	 */
+	if (next_run) {
+		gc_work->early_drop = false;
+		gc_work->next_bucket = 0;
+	}
+	queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
+}
+
+static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
+{
+	INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
+	gc_work->exiting = false;
+}
+
+static struct nf_conn *
+__nf_conntrack_alloc(struct net *net,
+		     const struct nf_conntrack_zone *zone,
+		     const struct nf_conntrack_tuple *orig,
+		     const struct nf_conntrack_tuple *repl,
+		     gfp_t gfp, u32 hash)
+{
+	struct nf_conn *ct;
+
+	/* We don't want any race condition at early drop stage */
+	atomic_inc(&net->ct.count);
+
+	if (nf_conntrack_max &&
+	    unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
+		if (!early_drop(net, hash)) {
+			if (!conntrack_gc_work.early_drop)
+				conntrack_gc_work.early_drop = true;
+			atomic_dec(&net->ct.count);
+			net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	/*
+	 * Do not use kmem_cache_zalloc(), as this cache uses
+	 * SLAB_TYPESAFE_BY_RCU.
+	 */
+	ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
+	if (ct == NULL)
+		goto out;
+
+	spin_lock_init(&ct->lock);
+	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
+	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
+	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
+	/* save hash for reusing when confirming */
+	*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
+	ct->status = 0;
+	WRITE_ONCE(ct->timeout, 0);
+	write_pnet(&ct->ct_net, net);
+	memset(&ct->__nfct_init_offset, 0,
+	       offsetof(struct nf_conn, proto) -
+	       offsetof(struct nf_conn, __nfct_init_offset));
+
+	nf_ct_zone_add(ct, zone);
+
+	/* Because we use RCU lookups, we set ct_general.use to zero before
+	 * this is inserted in any list.
+	 */
+	atomic_set(&ct->ct_general.use, 0);
+	return ct;
+out:
+	atomic_dec(&net->ct.count);
+	return ERR_PTR(-ENOMEM);
+}
+
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+				   const struct nf_conntrack_zone *zone,
+				   const struct nf_conntrack_tuple *orig,
+				   const struct nf_conntrack_tuple *repl,
+				   gfp_t gfp)
+{
+	return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
+
+void nf_conntrack_free(struct nf_conn *ct)
+{
+	struct net *net = nf_ct_net(ct);
+
+	/* A freed object has refcnt == 0, that's
+	 * the golden rule for SLAB_TYPESAFE_BY_RCU
+	 */
+	WARN_ON(atomic_read(&ct->ct_general.use) != 0);
+
+	nf_ct_ext_destroy(ct);
+	kmem_cache_free(nf_conntrack_cachep, ct);
+	smp_mb__before_atomic();
+	atomic_dec(&net->ct.count);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_free);
+
+
+/* Allocate a new conntrack: we return -ENOMEM if classification
+   failed due to stress.  Otherwise it really is unclassifiable. */
+static noinline struct nf_conntrack_tuple_hash *
+init_conntrack(struct net *net, struct nf_conn *tmpl,
+	       const struct nf_conntrack_tuple *tuple,
+	       struct sk_buff *skb,
+	       unsigned int dataoff, u32 hash)
+{
+	struct nf_conn *ct;
+	struct nf_conn_help *help;
+	struct nf_conntrack_tuple repl_tuple;
+	struct nf_conntrack_ecache *ecache;
+	struct nf_conntrack_expect *exp = NULL;
+	const struct nf_conntrack_zone *zone;
+	struct nf_conn_timeout *timeout_ext;
+	struct nf_conntrack_zone tmp;
+	int dir = 0;
+
+	if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
+		pr_debug("Can't invert tuple.\n");
+		return NULL;
+	}
+
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+	ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
+				  hash);
+	if (IS_ERR(ct))
+		return (struct nf_conntrack_tuple_hash *)ct;
+		
+#ifdef CONFIG_FASTNAT_MODULE
+    RCU_INIT_POINTER(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL], NULL);
+    RCU_INIT_POINTER(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY], NULL);
+    ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL] = NULL;
+    ct->fast_ct.fast_brport[IP_CT_DIR_REPLY]    = NULL;
+    ct->fast_ct.isFast = 0; //CT_FAST_NOT
+    RCU_INIT_POINTER(ct->fast_ct.sk, NULL);
+#endif
+	ct->packet_info[IP_CT_DIR_ORIGINAL].packets = 0;
+    ct->packet_info[IP_CT_DIR_ORIGINAL].bytes   = 0;
+    ct->packet_info[IP_CT_DIR_REPLY].packets    = 0;
+    ct->packet_info[IP_CT_DIR_REPLY].bytes      = 0;
+	for(dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+	{
+		ct->indev[dir] = NULL;
+		ct->outdev[dir] = NULL;
+	}
+	if (!nf_ct_add_synproxy(ct, tmpl)) {
+		nf_conntrack_free(ct);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
+
+	if (timeout_ext)
+		nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
+				      GFP_ATOMIC);
+
+	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
+	nf_ct_labels_ext_add(ct);
+
+	ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
+	nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
+				 ecache ? ecache->expmask : 0,
+			     GFP_ATOMIC);
+
+	local_bh_disable();
+	if (net->ct.expect_count) {
+		spin_lock(&nf_conntrack_expect_lock);
+		exp = nf_ct_find_expectation(net, zone, tuple);
+		if (exp) {
+			pr_debug("expectation arrives ct=%p exp=%p\n",
+				 ct, exp);
+			/* Welcome, Mr. Bond.  We've been expecting you... */
+			__set_bit(IPS_EXPECTED_BIT, &ct->status);
+			/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
+			ct->master = exp->master;
+			if (exp->helper) {
+				help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+				if (help)
+					rcu_assign_pointer(help->helper, exp->helper);
+			}
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+			ct->mark = exp->master->mark;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+			ct->secmark = exp->master->secmark;
+#endif
+			NF_CT_STAT_INC(net, expect_new);
+		}
+		spin_unlock(&nf_conntrack_expect_lock);
+	}
+	if (!exp)
+		__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
+
+	/* Now it is inserted into the unconfirmed list, bump refcount */
+	nf_conntrack_get(&ct->ct_general);
+	nf_ct_add_to_unconfirmed_list(ct);
+
+	local_bh_enable();
+
+	if (exp) {
+		if (exp->expectfn)
+			exp->expectfn(ct, exp);
+		nf_ct_expect_put(exp);
+	}
+
+	return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
+}
+
+/* On success, returns 0, sets skb->_nfct | ctinfo */
+static int
+resolve_normal_ct(struct nf_conn *tmpl,
+		  struct sk_buff *skb,
+		  unsigned int dataoff,
+		  u_int8_t protonum,
+		  const struct nf_hook_state *state)
+{
+	const struct nf_conntrack_zone *zone;
+	struct nf_conntrack_tuple tuple;
+	struct nf_conntrack_tuple_hash *h;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conntrack_zone tmp;
+	struct nf_conn *ct;
+	u32 hash;
+
+	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
+			     dataoff, state->pf, protonum, state->net,
+			     &tuple)) {
+		pr_debug("Can't get tuple\n");
+		return 0;
+	}
+
+	/* look for tuple match */
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+	hash = hash_conntrack_raw(&tuple, state->net);
+	h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
+	if (!h) {
+		h = init_conntrack(state->net, tmpl, &tuple,
+				   skb, dataoff, hash);
+		if (!h)
+			return 0;
+		if (IS_ERR(h))
+			return PTR_ERR(h);
+	}
+	ct = nf_ct_tuplehash_to_ctrack(h);
+
+	/* It exists; we have (non-exclusive) reference. */
+	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
+		ctinfo = IP_CT_ESTABLISHED_REPLY;
+	} else {
+		/* Once we've had two way comms, always ESTABLISHED. */
+		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+			pr_debug("normal packet for %p\n", ct);
+			ctinfo = IP_CT_ESTABLISHED;
+		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+			pr_debug("related packet for %p\n", ct);
+			ctinfo = IP_CT_RELATED;
+		} else {
+			pr_debug("new packet for %p\n", ct);
+			ctinfo = IP_CT_NEW;
+		}
+	}
+	nf_ct_set(skb, ct, ctinfo);
+	return 0;
+}
+
+/*
+ * icmp packets need special treatment to handle error messages that are
+ * related to a connection.
+ *
+ * Callers need to check if skb has a conntrack assigned when this
+ * helper returns; in such case skb belongs to an already known connection.
+ */
+static unsigned int __cold
+nf_conntrack_handle_icmp(struct nf_conn *tmpl,
+			 struct sk_buff *skb,
+			 unsigned int dataoff,
+			 u8 protonum,
+			 const struct nf_hook_state *state)
+{
+	int ret;
+
+	if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
+		ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
+#if IS_ENABLED(CONFIG_IPV6)
+	else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
+		ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
+#endif
+	else
+		return NF_ACCEPT;
+
+	if (ret <= 0)
+		NF_CT_STAT_INC_ATOMIC(state->net, error);
+
+	return ret;
+}
+
+static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
+			  enum ip_conntrack_info ctinfo)
+{
+	const unsigned int *timeout = nf_ct_timeout_lookup(ct);
+
+	if (!timeout)
+		timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
+
+	nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
+	return NF_ACCEPT;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int nf_conntrack_handle_packet(struct nf_conn *ct,
+				      struct sk_buff *skb,
+				      unsigned int dataoff,
+				      enum ip_conntrack_info ctinfo,
+				      const struct nf_hook_state *state)
+{
+	switch (nf_ct_protonum(ct)) {
+	case IPPROTO_TCP:
+		return nf_conntrack_tcp_packet(ct, skb, dataoff,
+					       ctinfo, state);
+	case IPPROTO_UDP:
+		return nf_conntrack_udp_packet(ct, skb, dataoff,
+					       ctinfo, state);
+	case IPPROTO_ICMP:
+		return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ICMPV6:
+		return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+	case IPPROTO_UDPLITE:
+		return nf_conntrack_udplite_packet(ct, skb, dataoff,
+						   ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+	case IPPROTO_SCTP:
+		return nf_conntrack_sctp_packet(ct, skb, dataoff,
+						ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+	case IPPROTO_DCCP:
+		return nf_conntrack_dccp_packet(ct, skb, dataoff,
+						ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+	case IPPROTO_GRE:
+		return nf_conntrack_gre_packet(ct, skb, dataoff,
+					       ctinfo, state);
+#endif
+	}
+
+	return generic_packet(ct, skb, ctinfo);
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+int nf_conntrack_handle_packet_fast(struct nf_conn *ct,
+				      struct sk_buff *skb,
+				      unsigned int dataoff,
+				      enum ip_conntrack_info ctinfo,
+				      const struct nf_hook_state *state)
+{
+	return nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
+}
+
+void nf_conntrack_put(struct nf_conntrack *nfct)
+{ 
+    if (nfct && atomic_dec_and_test(&nfct->use)){
+        fast_conn_release((struct nf_conn *)nfct, RELEASE_ALL_DST | RELEASE_ALL_SK);
+        nf_conntrack_destroy(nfct);
+    }
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_put);
+#endif
+
+
+
+unsigned int
+nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct, *tmpl;
+	u_int8_t protonum;
+	int dataoff, ret;
+
+	tmpl = nf_ct_get(skb, &ctinfo);
+	if (tmpl || ctinfo == IP_CT_UNTRACKED) {
+		/* Previously seen (loopback or untracked)?  Ignore. */
+		if ((tmpl && !nf_ct_is_template(tmpl)) ||
+		     ctinfo == IP_CT_UNTRACKED)
+			return NF_ACCEPT;
+		skb->_nfct = 0;
+	}
+
+	/* rcu_read_lock()ed by nf_hook_thresh */
+	dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
+	if (dataoff <= 0) {
+		pr_debug("not prepared to track yet or error occurred\n");
+		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+		ret = NF_ACCEPT;
+		goto out;
+	}
+
+	if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
+		ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
+					       protonum, state);
+		if (ret <= 0) {
+			ret = -ret;
+			goto out;
+		}
+		/* ICMP[v6] protocol trackers may assign one conntrack. */
+		if (skb->_nfct)
+			goto out;
+	}
+repeat:
+	ret = resolve_normal_ct(tmpl, skb, dataoff,
+				protonum, state);
+	if (ret < 0) {
+		/* Too stressed to deal. */
+		NF_CT_STAT_INC_ATOMIC(state->net, drop);
+		ret = NF_DROP;
+		goto out;
+	}
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct) {
+		/* Not valid part of a connection */
+		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+		ret = NF_ACCEPT;
+		goto out;
+	}
+
+	ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
+	if (ret <= 0) {
+		/* Invalid: inverse of the return code tells
+		 * the netfilter core what to do */
+		pr_debug("nf_conntrack_in: Can't track with proto module\n");
+		nf_conntrack_put(&ct->ct_general);
+		skb->_nfct = 0;
+		/* Special case: TCP tracker reports an attempt to reopen a
+		 * closed/aborted connection. We have to go back and create a
+		 * fresh conntrack.
+		 */
+		if (ret == -NF_REPEAT)
+			goto repeat;
+
+		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+		if (ret == -NF_DROP)
+			NF_CT_STAT_INC_ATOMIC(state->net, drop);
+
+		ret = -ret;
+		goto out;
+	}
+
+	if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
+	    !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+		nf_conntrack_event_cache(IPCT_REPLY, ct);
+out:
+	if (tmpl)
+		nf_ct_put(tmpl);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_in);
+
+/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
+   implicitly racy: see __nf_conntrack_confirm */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+			      const struct nf_conntrack_tuple *newreply)
+{
+	struct nf_conn_help *help = nfct_help(ct);
+
+	/* Should be unconfirmed, so not in hash table yet */
+	WARN_ON(nf_ct_is_confirmed(ct));
+
+	pr_debug("Altering reply tuple of %p to ", ct);
+	nf_ct_dump_tuple(newreply);
+
+	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+	if (ct->master || (help && !hlist_empty(&help->expectations)))
+		return;
+
+	rcu_read_lock();
+	__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
+
+/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
+void __nf_ct_refresh_acct(struct nf_conn *ct,
+			  enum ip_conntrack_info ctinfo,
+			  const struct sk_buff *skb,
+			  u32 extra_jiffies,
+			  bool do_acct)
+{
+	/* Only update if this is not a fixed timeout */
+	if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
+		goto acct;
+
+	/* If not in hash table, timer will not be active yet */
+	if (nf_ct_is_confirmed(ct))
+		extra_jiffies += nfct_time_stamp;
+
+	if (READ_ONCE(ct->timeout) != extra_jiffies)
+		WRITE_ONCE(ct->timeout, extra_jiffies);
+acct:
+	if (do_acct)
+		nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
+}
+EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
+
+bool nf_ct_kill_acct(struct nf_conn *ct,
+		     enum ip_conntrack_info ctinfo,
+		     const struct sk_buff *skb)
+{
+	nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
+
+	return nf_ct_delete(ct, 0, 0);
+}
+EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
+
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <linux/mutex.h>
+
+/* Generic function for tcp/udp/sctp/dccp and alike. */
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+			       const struct nf_conntrack_tuple *tuple)
+{
+	if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
+	    nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
+
+const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
+	[CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
+	[CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
+};
+EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
+
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+			       struct nf_conntrack_tuple *t,
+			       u_int32_t flags)
+{
+	if (flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) {
+		if (!tb[CTA_PROTO_SRC_PORT])
+			return -EINVAL;
+
+		t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
+	}
+
+	if (flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) {
+		if (!tb[CTA_PROTO_DST_PORT])
+			return -EINVAL;
+
+		t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
+
+unsigned int nf_ct_port_nlattr_tuple_size(void)
+{
+	static unsigned int size __read_mostly;
+
+	if (!size)
+		size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
+
+	return size;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
+#endif
+
+/* Used by ipt_REJECT and ip6t_REJECT. */
+static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	/* This ICMP is in reverse direction to the packet which caused it */
+	ct = nf_ct_get(skb, &ctinfo);
+	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
+		ctinfo = IP_CT_RELATED_REPLY;
+	else
+		ctinfo = IP_CT_RELATED;
+
+	/* Attach to new skbuff, and increment count */
+	nf_ct_set(nskb, ct, ctinfo);
+	nf_conntrack_get(skb_nfct(nskb));
+}
+
+static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
+				 struct nf_conn *ct,
+				 enum ip_conntrack_info ctinfo)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conntrack_tuple tuple;
+	struct nf_nat_hook *nat_hook;
+	unsigned int status;
+	int dataoff;
+	u16 l3num;
+	u8 l4num;
+
+	l3num = nf_ct_l3num(ct);
+
+	dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
+	if (dataoff <= 0)
+		return -1;
+
+	if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
+			     l4num, net, &tuple))
+		return -1;
+
+	if (ct->status & IPS_SRC_NAT) {
+		memcpy(tuple.src.u3.all,
+		       ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
+		       sizeof(tuple.src.u3.all));
+		tuple.src.u.all =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
+	}
+
+	if (ct->status & IPS_DST_NAT) {
+		memcpy(tuple.dst.u3.all,
+		       ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
+		       sizeof(tuple.dst.u3.all));
+		tuple.dst.u.all =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
+	}
+
+	h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
+	if (!h)
+		return 0;
+
+	/* Store status bits of the conntrack that is clashing to re-do NAT
+	 * mangling according to what it has been done already to this packet.
+	 */
+	status = ct->status;
+
+	nf_ct_put(ct);
+	ct = nf_ct_tuplehash_to_ctrack(h);
+	nf_ct_set(skb, ct, ctinfo);
+
+	nat_hook = rcu_dereference(nf_nat_hook);
+	if (!nat_hook)
+		return 0;
+
+	if (status & IPS_SRC_NAT &&
+	    nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
+				IP_CT_DIR_ORIGINAL) == NF_DROP)
+		return -1;
+
+	if (status & IPS_DST_NAT &&
+	    nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
+				IP_CT_DIR_ORIGINAL) == NF_DROP)
+		return -1;
+
+	return 0;
+}
+
+/* This packet is coming from userspace via nf_queue, complete the packet
+ * processing after the helper invocation in nf_confirm().
+ */
+static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
+			       enum ip_conntrack_info ctinfo)
+{
+	const struct nf_conntrack_helper *helper;
+	const struct nf_conn_help *help;
+	int protoff;
+
+	help = nfct_help(ct);
+	if (!help)
+		return 0;
+
+	helper = rcu_dereference(help->helper);
+	if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
+		return 0;
+
+	switch (nf_ct_l3num(ct)) {
+	case NFPROTO_IPV4:
+		protoff = skb_network_offset(skb) + ip_hdrlen(skb);
+		break;
+#if IS_ENABLED(CONFIG_IPV6)
+	case NFPROTO_IPV6: {
+		__be16 frag_off;
+		u8 pnum;
+
+		pnum = ipv6_hdr(skb)->nexthdr;
+		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
+					   &frag_off);
+		if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
+			return 0;
+		break;
+	}
+#endif
+	default:
+		return 0;
+	}
+
+	if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+	    !nf_is_loopback_packet(skb)) {
+		if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
+			NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+			return -1;
+		}
+	}
+
+	/* We've seen it coming out the other side: confirm it */
+	return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
+}
+
+static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+	int err;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return 0;
+
+	if (!nf_ct_is_confirmed(ct)) {
+		err = __nf_conntrack_update(net, skb, ct, ctinfo);
+		if (err < 0)
+			return err;
+
+		ct = nf_ct_get(skb, &ctinfo);
+	}
+
+	return nf_confirm_cthelper(skb, ct, ctinfo);
+}
+
+static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+				       const struct sk_buff *skb)
+{
+	const struct nf_conntrack_tuple *src_tuple;
+	const struct nf_conntrack_tuple_hash *hash;
+	struct nf_conntrack_tuple srctuple;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (ct) {
+		src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+		memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+		return true;
+	}
+
+	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+			       NFPROTO_IPV4, dev_net(skb->dev),
+			       &srctuple))
+		return false;
+
+	hash = nf_conntrack_find_get(dev_net(skb->dev),
+				     &nf_ct_zone_dflt,
+				     &srctuple);
+	if (!hash)
+		return false;
+
+	ct = nf_ct_tuplehash_to_ctrack(hash);
+	src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+	memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+	nf_ct_put(ct);
+
+	return true;
+}
+
+/* Bring out ya dead! */
+static struct nf_conn *
+get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
+		void *data, unsigned int *bucket)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+	struct hlist_nulls_node *n;
+	spinlock_t *lockp;
+
+	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+		struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
+
+		if (hlist_nulls_empty(hslot))
+			continue;
+
+		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
+		local_bh_disable();
+		nf_conntrack_lock(lockp);
+		hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
+			if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
+				continue;
+			/* All nf_conn objects are added to hash table twice, one
+			 * for original direction tuple, once for the reply tuple.
+			 *
+			 * Exception: In the IPS_NAT_CLASH case, only the reply
+			 * tuple is added (the original tuple already existed for
+			 * a different object).
+			 *
+			 * We only need to call the iterator once for each
+			 * conntrack, so we just use the 'reply' direction
+			 * tuple while iterating.
+			 */
+			ct = nf_ct_tuplehash_to_ctrack(h);
+			if (iter(ct, data))
+				goto found;
+		}
+		spin_unlock(lockp);
+		local_bh_enable();
+		cond_resched();
+	}
+
+	return NULL;
+found:
+	atomic_inc(&ct->ct_general.use);
+	spin_unlock(lockp);
+	local_bh_enable();
+	return ct;
+}
+
+static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
+				  void *data, u32 portid, int report)
+{
+	unsigned int bucket = 0;
+	struct nf_conn *ct;
+
+	might_sleep();
+
+	mutex_lock(&nf_conntrack_mutex);
+	while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+		/* Time to push up daises... */
+
+		nf_ct_delete(ct, portid, report);
+		nf_ct_put(ct);
+		cond_resched();
+	}
+	mutex_unlock(&nf_conntrack_mutex);
+}
+
+struct iter_data {
+	int (*iter)(struct nf_conn *i, void *data);
+	void *data;
+	struct net *net;
+};
+
+static int iter_net_only(struct nf_conn *i, void *data)
+{
+	struct iter_data *d = data;
+
+	if (!net_eq(d->net, nf_ct_net(i)))
+		return 0;
+
+	return d->iter(i, d->data);
+}
+
+static void
+__nf_ct_unconfirmed_destroy(struct net *net)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct nf_conntrack_tuple_hash *h;
+		struct hlist_nulls_node *n;
+		struct ct_pcpu *pcpu;
+
+		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+		spin_lock_bh(&pcpu->lock);
+		hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
+			struct nf_conn *ct;
+
+			ct = nf_ct_tuplehash_to_ctrack(h);
+
+			/* we cannot call iter() on unconfirmed list, the
+			 * owning cpu can reallocate ct->ext at any time.
+			 */
+			set_bit(IPS_DYING_BIT, &ct->status);
+		}
+		spin_unlock_bh(&pcpu->lock);
+		cond_resched();
+	}
+}
+
+void nf_ct_unconfirmed_destroy(struct net *net)
+{
+	might_sleep();
+
+	if (atomic_read(&net->ct.count) > 0) {
+		__nf_ct_unconfirmed_destroy(net);
+		nf_queue_nf_hook_drop(net);
+		synchronize_net();
+	}
+}
+EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
+
+void nf_ct_iterate_cleanup_net(struct net *net,
+			       int (*iter)(struct nf_conn *i, void *data),
+			       void *data, u32 portid, int report)
+{
+	struct iter_data d;
+
+	might_sleep();
+
+	if (atomic_read(&net->ct.count) == 0)
+		return;
+
+	d.iter = iter;
+	d.data = data;
+	d.net = net;
+
+	nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
+}
+EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
+
+/**
+ * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
+ * @iter: callback to invoke for each conntrack
+ * @data: data to pass to @iter
+ *
+ * Like nf_ct_iterate_cleanup, but first marks conntracks on the
+ * unconfirmed list as dying (so they will not be inserted into
+ * main table).
+ *
+ * Can only be called in module exit path.
+ */
+void
+nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
+{
+	struct net *net;
+
+	down_read(&net_rwsem);
+	for_each_net(net) {
+		if (atomic_read(&net->ct.count) == 0)
+			continue;
+		__nf_ct_unconfirmed_destroy(net);
+		nf_queue_nf_hook_drop(net);
+	}
+	up_read(&net_rwsem);
+
+	/* Need to wait for netns cleanup worker to finish, if its
+	 * running -- it might have deleted a net namespace from
+	 * the global list, so our __nf_ct_unconfirmed_destroy() might
+	 * not have affected all namespaces.
+	 */
+	net_ns_barrier();
+
+	/* a conntrack could have been unlinked from unconfirmed list
+	 * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
+	 * This makes sure its inserted into conntrack table.
+	 */
+	synchronize_net();
+
+	nf_ct_iterate_cleanup(iter, data, 0, 0);
+}
+EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
+
+static int kill_all(struct nf_conn *i, void *data)
+{
+	return net_eq(nf_ct_net(i), data);
+}
+
+void nf_ct_free_hashtable(void *hash, unsigned int size)
+{
+	if (is_vmalloc_addr(hash))
+		vfree(hash);
+	else
+		free_pages((unsigned long)hash,
+			   get_order(sizeof(struct hlist_head) * size));
+}
+EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
+
+void nf_conntrack_cleanup_start(void)
+{
+	conntrack_gc_work.exiting = true;
+	RCU_INIT_POINTER(ip_ct_attach, NULL);
+}
+
+void nf_conntrack_cleanup_end(void)
+{
+	RCU_INIT_POINTER(nf_ct_hook, NULL);
+	cancel_delayed_work_sync(&conntrack_gc_work.dwork);
+	kvfree(nf_conntrack_hash);
+
+	nf_conntrack_proto_fini();
+	nf_conntrack_seqadj_fini();
+	nf_conntrack_labels_fini();
+	nf_conntrack_helper_fini();
+	nf_conntrack_timeout_fini();
+	nf_conntrack_ecache_fini();
+	nf_conntrack_tstamp_fini();
+	nf_conntrack_acct_fini();
+	nf_conntrack_expect_fini();
+
+	kmem_cache_destroy(nf_conntrack_cachep);
+}
+
+/*
+ * Mishearing the voices in his head, our hero wonders how he's
+ * supposed to kill the mall.
+ */
+void nf_conntrack_cleanup_net(struct net *net)
+{
+	LIST_HEAD(single);
+
+	list_add(&net->exit_list, &single);
+	nf_conntrack_cleanup_net_list(&single);
+}
+
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
+{
+	int busy;
+	struct net *net;
+
+	/*
+	 * This makes sure all current packets have passed through
+	 *  netfilter framework.  Roll on, two-stage module
+	 *  delete...
+	 */
+	synchronize_net();
+i_see_dead_people:
+	busy = 0;
+	list_for_each_entry(net, net_exit_list, exit_list) {
+		nf_ct_iterate_cleanup(kill_all, net, 0, 0);
+		if (atomic_read(&net->ct.count) != 0)
+			busy = 1;
+	}
+	if (busy) {
+		schedule();
+		goto i_see_dead_people;
+	}
+
+	list_for_each_entry(net, net_exit_list, exit_list) {
+		nf_conntrack_proto_pernet_fini(net);
+		nf_conntrack_ecache_pernet_fini(net);
+		nf_conntrack_expect_pernet_fini(net);
+		free_percpu(net->ct.stat);
+		free_percpu(net->ct.pcpu_lists);
+	}
+}
+
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
+{
+	struct hlist_nulls_head *hash;
+	unsigned int nr_slots, i;
+
+	if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+		return NULL;
+
+	BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
+	nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+	hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
+
+	if (hash && nulls)
+		for (i = 0; i < nr_slots; i++)
+			INIT_HLIST_NULLS_HEAD(&hash[i], i);
+
+	return hash;
+}
+EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
+
+int nf_conntrack_hash_resize(unsigned int hashsize)
+{
+	int i, bucket;
+	unsigned int old_size;
+	struct hlist_nulls_head *hash, *old_hash;
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+
+	if (!hashsize)
+		return -EINVAL;
+
+	hash = nf_ct_alloc_hashtable(&hashsize, 1);
+	if (!hash)
+		return -ENOMEM;
+
+	mutex_lock(&nf_conntrack_mutex);
+	old_size = nf_conntrack_htable_size;
+	if (old_size == hashsize) {
+		mutex_unlock(&nf_conntrack_mutex);
+		kvfree(hash);
+		return 0;
+	}
+
+	local_bh_disable();
+	nf_conntrack_all_lock();
+	write_seqcount_begin(&nf_conntrack_generation);
+
+	/* Lookups in the old hash might happen in parallel, which means we
+	 * might get false negatives during connection lookup. New connections
+	 * created because of a false negative won't make it into the hash
+	 * though since that required taking the locks.
+	 */
+
+	for (i = 0; i < nf_conntrack_htable_size; i++) {
+		while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+			h = hlist_nulls_entry(nf_conntrack_hash[i].first,
+					      struct nf_conntrack_tuple_hash, hnnode);
+			ct = nf_ct_tuplehash_to_ctrack(h);
+			hlist_nulls_del_rcu(&h->hnnode);
+			bucket = __hash_conntrack(nf_ct_net(ct),
+						  &h->tuple, hashsize);
+			hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
+		}
+	}
+	old_size = nf_conntrack_htable_size;
+	old_hash = nf_conntrack_hash;
+
+	nf_conntrack_hash = hash;
+	nf_conntrack_htable_size = hashsize;
+
+	write_seqcount_end(&nf_conntrack_generation);
+	nf_conntrack_all_unlock();
+	local_bh_enable();
+
+	mutex_unlock(&nf_conntrack_mutex);
+
+	synchronize_net();
+	kvfree(old_hash);
+	return 0;
+}
+
+int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
+{
+	unsigned int hashsize;
+	int rc;
+
+	if (current->nsproxy->net_ns != &init_net)
+		return -EOPNOTSUPP;
+
+	/* On boot, we can set this without any fancy locking. */
+	if (!nf_conntrack_hash)
+		return param_set_uint(val, kp);
+
+	rc = kstrtouint(val, 0, &hashsize);
+	if (rc)
+		return rc;
+
+	return nf_conntrack_hash_resize(hashsize);
+}
+
+static __always_inline unsigned int total_extension_size(void)
+{
+	/* remember to add new extensions below */
+	BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
+
+	return sizeof(struct nf_ct_ext) +
+	       sizeof(struct nf_conn_help)
+#if IS_ENABLED(CONFIG_NF_NAT)
+		+ sizeof(struct nf_conn_nat)
+#endif
+		+ sizeof(struct nf_conn_seqadj)
+		+ sizeof(struct nf_conn_acct)
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+		+ sizeof(struct nf_conntrack_ecache)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+		+ sizeof(struct nf_conn_tstamp)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+		+ sizeof(struct nf_conn_timeout)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+		+ sizeof(struct nf_conn_labels)
+#endif
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+		+ sizeof(struct nf_conn_synproxy)
+#endif
+	;
+};
+
+int nf_conntrack_init_start(void)
+{
+	unsigned long nr_pages = totalram_pages();
+	int max_factor = 8;
+	int ret = -ENOMEM;
+	int i;
+
+	/* struct nf_ct_ext uses u8 to store offsets/size */
+	BUILD_BUG_ON(total_extension_size() > 255u);
+
+	seqcount_spinlock_init(&nf_conntrack_generation,
+			       &nf_conntrack_locks_all_lock);
+
+	for (i = 0; i < CONNTRACK_LOCKS; i++)
+		spin_lock_init(&nf_conntrack_locks[i]);
+
+	if (!nf_conntrack_htable_size) {
+		/* Idea from tcp.c: use 1/16384 of memory.
+		 * On i386: 32MB machine has 512 buckets.
+		 * >= 1GB machines have 16384 buckets.
+		 * >= 4GB machines have 65536 buckets.
+		 */
+		nf_conntrack_htable_size
+			= (((nr_pages << PAGE_SHIFT) / 16384)
+			   / sizeof(struct hlist_head));
+		if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
+			nf_conntrack_htable_size = 65536;
+		else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
+			nf_conntrack_htable_size = 16384;
+		if (nf_conntrack_htable_size < 32)
+			nf_conntrack_htable_size = 32;
+
+		/* Use a max. factor of four by default to get the same max as
+		 * with the old struct list_heads. When a table size is given
+		 * we use the old value of 8 to avoid reducing the max.
+		 * entries. */
+		max_factor = 4;
+	}
+
+	nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
+	if (!nf_conntrack_hash)
+		return -ENOMEM;
+
+	nf_conntrack_max = max_factor * nf_conntrack_htable_size;
+
+	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+						sizeof(struct nf_conn),
+						NFCT_INFOMASK + 1,
+						SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
+	if (!nf_conntrack_cachep)
+		goto err_cachep;
+
+	ret = nf_conntrack_expect_init();
+	if (ret < 0)
+		goto err_expect;
+
+	ret = nf_conntrack_acct_init();
+	if (ret < 0)
+		goto err_acct;
+
+	ret = nf_conntrack_tstamp_init();
+	if (ret < 0)
+		goto err_tstamp;
+
+	ret = nf_conntrack_ecache_init();
+	if (ret < 0)
+		goto err_ecache;
+
+	ret = nf_conntrack_timeout_init();
+	if (ret < 0)
+		goto err_timeout;
+
+	ret = nf_conntrack_helper_init();
+	if (ret < 0)
+		goto err_helper;
+
+	ret = nf_conntrack_labels_init();
+	if (ret < 0)
+		goto err_labels;
+
+	ret = nf_conntrack_seqadj_init();
+	if (ret < 0)
+		goto err_seqadj;
+
+	ret = nf_conntrack_proto_init();
+	if (ret < 0)
+		goto err_proto;
+
+	conntrack_gc_work_init(&conntrack_gc_work);
+	queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
+
+	return 0;
+
+err_proto:
+	nf_conntrack_seqadj_fini();
+err_seqadj:
+	nf_conntrack_labels_fini();
+err_labels:
+	nf_conntrack_helper_fini();
+err_helper:
+	nf_conntrack_timeout_fini();
+err_timeout:
+	nf_conntrack_ecache_fini();
+err_ecache:
+	nf_conntrack_tstamp_fini();
+err_tstamp:
+	nf_conntrack_acct_fini();
+err_acct:
+	nf_conntrack_expect_fini();
+err_expect:
+	kmem_cache_destroy(nf_conntrack_cachep);
+err_cachep:
+	kvfree(nf_conntrack_hash);
+	return ret;
+}
+
+static struct nf_ct_hook nf_conntrack_hook = {
+	.update		= nf_conntrack_update,
+	.destroy	= destroy_conntrack,
+	.get_tuple_skb  = nf_conntrack_get_tuple_skb,
+};
+
+void nf_conntrack_init_end(void)
+{
+	/* For use by REJECT target */
+	RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
+	RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
+}
+
+/*
+ * We need to use special "null" values, not used in hash table
+ */
+#define UNCONFIRMED_NULLS_VAL	((1<<30)+0)
+#define DYING_NULLS_VAL		((1<<30)+1)
+
+int nf_conntrack_init_net(struct net *net)
+{
+	int ret = -ENOMEM;
+	int cpu;
+
+	BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
+	BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
+	atomic_set(&net->ct.count, 0);
+
+	net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
+	if (!net->ct.pcpu_lists)
+		goto err_stat;
+
+	for_each_possible_cpu(cpu) {
+		struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+		spin_lock_init(&pcpu->lock);
+		INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
+		INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
+	}
+
+	net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
+	if (!net->ct.stat)
+		goto err_pcpu_lists;
+
+	ret = nf_conntrack_expect_pernet_init(net);
+	if (ret < 0)
+		goto err_expect;
+
+	nf_conntrack_acct_pernet_init(net);
+	nf_conntrack_tstamp_pernet_init(net);
+	nf_conntrack_ecache_pernet_init(net);
+	nf_conntrack_helper_pernet_init(net);
+	nf_conntrack_proto_pernet_init(net);
+
+	return 0;
+
+err_expect:
+	free_percpu(net->ct.stat);
+err_pcpu_lists:
+	free_percpu(net->ct.pcpu_lists);
+err_stat:
+	return ret;
+}
diff --git a/upstream/linux-5.10/net/netfilter/xt_DSCP.c b/upstream/linux-5.10/net/netfilter/xt_DSCP.c
new file mode 100755
index 0000000..eababc3
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_DSCP.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* x_tables module for setting the IPv4/IPv6 DSCP field, Version 1.8
+ *
+ * (C) 2002 by Harald Welte <laforge@netfilter.org>
+ * based on ipt_FTOS.c (C) 2000 by Matthew G. Marsh <mgm@paktronix.com>
+ *
+ * See RFC2474 for a description of the DSCP field within the IP Header.
+*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/dsfield.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_DSCP.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_DSCP");
+MODULE_ALIAS("ip6t_DSCP");
+MODULE_ALIAS("ipt_TOS");
+MODULE_ALIAS("ip6t_TOS");
+
+static unsigned int
+dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_DSCP_info *dinfo = par->targinfo;
+	u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
+
+	if (dscp != dinfo->dscp) {
+		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+			return NF_DROP;
+
+		ipv4_change_dsfield(ip_hdr(skb),
+				    (__force __u8)(~XT_DSCP_MASK),
+				    dinfo->dscp << XT_DSCP_SHIFT);
+
+	}
+	return XT_CONTINUE;
+}
+
+static unsigned int
+dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_DSCP_info *dinfo = par->targinfo;
+	u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
+
+	if (dscp != dinfo->dscp) {
+		if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
+			return NF_DROP;
+
+		ipv6_change_dsfield(ipv6_hdr(skb),
+				    (__force __u8)(~XT_DSCP_MASK),
+				    dinfo->dscp << XT_DSCP_SHIFT);
+	}
+	return XT_CONTINUE;
+}
+
+static int dscp_tg_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_DSCP_info *info = par->targinfo;
+
+	if (info->dscp > XT_DSCP_MAX)
+		return -EDOM;
+	return 0;
+}
+
+static unsigned int
+tos_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_tos_target_info *info = par->targinfo;
+	struct iphdr *iph = ip_hdr(skb);
+	u_int8_t orig, nv;
+
+	orig = ipv4_get_dsfield(iph);
+	nv   = (orig & ~info->tos_mask) ^ info->tos_value;
+
+	if (orig != nv) {
+		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+			return NF_DROP;
+		iph = ip_hdr(skb);
+		ipv4_change_dsfield(iph, 0, nv);
+	}
+
+	return XT_CONTINUE;
+}
+
+static unsigned int
+tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_tos_target_info *info = par->targinfo;
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+	u_int8_t orig, nv;
+
+	orig = ipv6_get_dsfield(iph);
+	nv   = (orig & ~info->tos_mask) ^ info->tos_value;
+
+	if (orig != nv) {
+		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+			return NF_DROP;
+		iph = ipv6_hdr(skb);
+		ipv6_change_dsfield(iph, 0, nv);
+	}
+
+	return XT_CONTINUE;
+}
+
+static struct xt_target dscp_tg_reg[] __read_mostly = {
+	{
+		.name		= "DSCP",
+		.family		= NFPROTO_IPV4,
+		.checkentry	= dscp_tg_check,
+		.target		= dscp_tg,
+		.targetsize	= sizeof(struct xt_DSCP_info),
+		.table		= "mangle",
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "DSCP",
+		.family		= NFPROTO_IPV6,
+		.checkentry	= dscp_tg_check,
+		.target		= dscp_tg6,
+		.targetsize	= sizeof(struct xt_DSCP_info),
+		.table		= "mangle",
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "TOS",
+		.revision	= 1,
+		.family		= NFPROTO_IPV4,
+		.table		= "mangle",
+		.target		= tos_tg,
+		.targetsize	= sizeof(struct xt_tos_target_info),
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "TOS",
+		.revision	= 1,
+		.family		= NFPROTO_IPV6,
+		.table		= "mangle",
+		.target		= tos_tg6,
+		.targetsize	= sizeof(struct xt_tos_target_info),
+		.me		= THIS_MODULE,
+	},
+};
+
+static int __init dscp_tg_init(void)
+{
+	return xt_register_targets(dscp_tg_reg, ARRAY_SIZE(dscp_tg_reg));
+}
+
+static void __exit dscp_tg_exit(void)
+{
+	xt_unregister_targets(dscp_tg_reg, ARRAY_SIZE(dscp_tg_reg));
+}
+
+module_init(dscp_tg_init);
+module_exit(dscp_tg_exit);
diff --git a/upstream/linux-5.10/net/netfilter/xt_RATEEST.c b/upstream/linux-5.10/net/netfilter/xt_RATEEST.c
new file mode 100755
index 0000000..0d5c422
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_RATEEST.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2007 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/gen_stats.h>
+#include <linux/jhash.h>
+#include <linux/rtnetlink.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <net/gen_stats.h>
+#include <net/netlink.h>
+#include <net/netns/generic.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_RATEEST.h>
+#include <net/netfilter/xt_rateest.h>
+
+#define RATEEST_HSIZE	16
+
+struct xt_rateest_net {
+	struct mutex hash_lock;
+	struct hlist_head hash[RATEEST_HSIZE];
+};
+
+static unsigned int xt_rateest_id;
+
+static unsigned int jhash_rnd __read_mostly;
+
+static unsigned int xt_rateest_hash(const char *name)
+{
+	return jhash(name, sizeof_field(struct xt_rateest, name), jhash_rnd) &
+	       (RATEEST_HSIZE - 1);
+}
+
+static void xt_rateest_hash_insert(struct xt_rateest_net *xn,
+				   struct xt_rateest *est)
+{
+	unsigned int h;
+
+	h = xt_rateest_hash(est->name);
+	hlist_add_head(&est->list, &xn->hash[h]);
+}
+
+static struct xt_rateest *__xt_rateest_lookup(struct xt_rateest_net *xn,
+					      const char *name)
+{
+	struct xt_rateest *est;
+	unsigned int h;
+
+	h = xt_rateest_hash(name);
+	hlist_for_each_entry(est, &xn->hash[h], list) {
+		if (strcmp(est->name, name) == 0) {
+			est->refcnt++;
+			return est;
+		}
+	}
+
+	return NULL;
+}
+
+struct xt_rateest *xt_rateest_lookup(struct net *net, const char *name)
+{
+	struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+	struct xt_rateest *est;
+
+	mutex_lock(&xn->hash_lock);
+	est = __xt_rateest_lookup(xn, name);
+	mutex_unlock(&xn->hash_lock);
+	return est;
+}
+EXPORT_SYMBOL_GPL(xt_rateest_lookup);
+
+void xt_rateest_put(struct net *net, struct xt_rateest *est)
+{
+	struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+
+	mutex_lock(&xn->hash_lock);
+	if (--est->refcnt == 0) {
+		hlist_del(&est->list);
+		gen_kill_estimator(&est->rate_est);
+		/*
+		 * gen_estimator est_timer() might access est->lock or bstats,
+		 * wait a RCU grace period before freeing 'est'
+		 */
+		kfree_rcu(est, rcu);
+	}
+	mutex_unlock(&xn->hash_lock);
+}
+EXPORT_SYMBOL_GPL(xt_rateest_put);
+
+static unsigned int
+xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_rateest_target_info *info = par->targinfo;
+	struct gnet_stats_basic_packed *stats = &info->est->bstats;
+
+	spin_lock_bh(&info->est->lock);
+	stats->bytes += skb->len;
+	stats->packets++;
+	spin_unlock_bh(&info->est->lock);
+
+	return XT_CONTINUE;
+}
+
+static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
+{
+	struct xt_rateest_net *xn = net_generic(par->net, xt_rateest_id);
+	struct xt_rateest_target_info *info = par->targinfo;
+	struct xt_rateest *est;
+	struct {
+		struct nlattr		opt;
+		struct gnet_estimator	est;
+	} cfg;
+	int ret;
+
+	if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name))
+		return -ENAMETOOLONG;
+
+	net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
+
+	mutex_lock(&xn->hash_lock);
+	est = __xt_rateest_lookup(xn, info->name);
+	if (est) {
+		mutex_unlock(&xn->hash_lock);
+		/*
+		 * If estimator parameters are specified, they must match the
+		 * existing estimator.
+		 */
+		if ((!info->interval && !info->ewma_log) ||
+		    (info->interval != est->params.interval ||
+		     info->ewma_log != est->params.ewma_log)) {
+			xt_rateest_put(par->net, est);
+			return -EINVAL;
+		}
+		info->est = est;
+		return 0;
+	}
+
+	ret = -ENOMEM;
+	est = kzalloc(sizeof(*est), GFP_KERNEL);
+	if (!est)
+		goto err1;
+
+	strlcpy(est->name, info->name, sizeof(est->name));
+	spin_lock_init(&est->lock);
+	est->refcnt		= 1;
+	est->params.interval	= info->interval;
+	est->params.ewma_log	= info->ewma_log;
+
+	cfg.opt.nla_len		= nla_attr_size(sizeof(cfg.est));
+	cfg.opt.nla_type	= TCA_STATS_RATE_EST;
+	cfg.est.interval	= info->interval;
+	cfg.est.ewma_log	= info->ewma_log;
+
+	ret = gen_new_estimator(&est->bstats, NULL, &est->rate_est,
+				&est->lock, NULL, &cfg.opt);
+	if (ret < 0)
+		goto err2;
+
+	info->est = est;
+	xt_rateest_hash_insert(xn, est);
+	mutex_unlock(&xn->hash_lock);
+	return 0;
+
+err2:
+	kfree(est);
+err1:
+	mutex_unlock(&xn->hash_lock);
+	return ret;
+}
+
+static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par)
+{
+	struct xt_rateest_target_info *info = par->targinfo;
+
+	xt_rateest_put(par->net, info->est);
+}
+
+static struct xt_target xt_rateest_tg_reg __read_mostly = {
+	.name       = "RATEEST",
+	.revision   = 0,
+	.family     = NFPROTO_UNSPEC,
+	.target     = xt_rateest_tg,
+	.checkentry = xt_rateest_tg_checkentry,
+	.destroy    = xt_rateest_tg_destroy,
+	.targetsize = sizeof(struct xt_rateest_target_info),
+	.usersize   = offsetof(struct xt_rateest_target_info, est),
+	.me         = THIS_MODULE,
+};
+
+static __net_init int xt_rateest_net_init(struct net *net)
+{
+	struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+	int i;
+
+	mutex_init(&xn->hash_lock);
+	for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
+		INIT_HLIST_HEAD(&xn->hash[i]);
+	return 0;
+}
+
+static struct pernet_operations xt_rateest_net_ops = {
+	.init = xt_rateest_net_init,
+	.id   = &xt_rateest_id,
+	.size = sizeof(struct xt_rateest_net),
+};
+
+static int __init xt_rateest_tg_init(void)
+{
+	int err = register_pernet_subsys(&xt_rateest_net_ops);
+
+	if (err)
+		return err;
+	return xt_register_target(&xt_rateest_tg_reg);
+}
+
+static void __exit xt_rateest_tg_fini(void)
+{
+	xt_unregister_target(&xt_rateest_tg_reg);
+	unregister_pernet_subsys(&xt_rateest_net_ops);
+}
+
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: packet rate estimator");
+MODULE_ALIAS("ipt_RATEEST");
+MODULE_ALIAS("ip6t_RATEEST");
+module_init(xt_rateest_tg_init);
+module_exit(xt_rateest_tg_fini);
diff --git a/upstream/linux-5.10/net/netfilter/xt_TCPMSS.c b/upstream/linux-5.10/net/netfilter/xt_TCPMSS.c
new file mode 100755
index 0000000..122db9f
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_TCPMSS.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is a module which is used for setting the MSS option in TCP packets.
+ *
+ * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
+ * Copyright (C) 2007 Patrick McHardy <kaber@trash.net>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/gfp.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <net/dst.h>
+#include <net/flow.h>
+#include <net/ipv6.h>
+#include <net/route.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter/xt_TCPMSS.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
+MODULE_DESCRIPTION("Xtables: TCP Maximum Segment Size (MSS) adjustment");
+MODULE_ALIAS("ipt_TCPMSS");
+MODULE_ALIAS("ip6t_TCPMSS");
+
+static inline unsigned int
+optlen(const u_int8_t *opt, unsigned int offset)
+{
+	/* Beware zero-length options: make finite progress */
+	if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
+		return 1;
+	else
+		return opt[offset+1];
+}
+
+static u_int32_t tcpmss_reverse_mtu(struct net *net,
+				    const struct sk_buff *skb,
+				    unsigned int family)
+{
+	struct flowi fl;
+	struct rtable *rt = NULL;
+	u_int32_t mtu     = ~0U;
+
+	if (family == PF_INET) {
+		struct flowi4 *fl4 = &fl.u.ip4;
+		memset(fl4, 0, sizeof(*fl4));
+		fl4->daddr = ip_hdr(skb)->saddr;
+	} else {
+		struct flowi6 *fl6 = &fl.u.ip6;
+
+		memset(fl6, 0, sizeof(*fl6));
+		fl6->daddr = ipv6_hdr(skb)->saddr;
+	}
+
+	nf_route(net, (struct dst_entry **)&rt, &fl, false, family);
+	if (rt != NULL) {
+		mtu = dst_mtu(&rt->dst);
+		dst_release(&rt->dst);
+	}
+	return mtu;
+}
+
+static int
+tcpmss_mangle_packet(struct sk_buff *skb,
+		     const struct xt_action_param *par,
+		     unsigned int family,
+		     unsigned int tcphoff,
+		     unsigned int minlen)
+{
+	const struct xt_tcpmss_info *info = par->targinfo;
+	struct tcphdr *tcph;
+	int len, tcp_hdrlen;
+	unsigned int i;
+	__be16 oldval;
+	u16 newmss;
+	u8 *opt;
+
+	/* This is a fragment, no TCP header is available */
+	if (par->fragoff != 0)
+		return 0;
+
+	if (skb_ensure_writable(skb, skb->len))
+		return -1;
+
+	len = skb->len - tcphoff;
+	if (len < (int)sizeof(struct tcphdr))
+		return -1;
+
+	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+	tcp_hdrlen = tcph->doff * 4;
+
+	if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
+		return -1;
+
+	if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+		struct net *net = xt_net(par);
+		unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
+		unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu);
+
+		if (min_mtu <= minlen) {
+			net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
+					    min_mtu);
+			return -1;
+		}
+		newmss = min_mtu - minlen;
+	} else
+		newmss = info->mss;
+
+	opt = (u_int8_t *)tcph;
+	for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
+		if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
+			u_int16_t oldmss;
+
+			oldmss = (opt[i+2] << 8) | opt[i+3];
+
+			/* Never increase MSS, even when setting it, as
+			 * doing so results in problems for hosts that rely
+			 * on MSS being set correctly.
+			 */
+			if (oldmss <= newmss)
+				return 0;
+
+			opt[i+2] = (newmss & 0xff00) >> 8;
+			opt[i+3] = newmss & 0x00ff;
+
+			inet_proto_csum_replace2(&tcph->check, skb,
+						 htons(oldmss), htons(newmss),
+						 false);
+			return 0;
+		}
+	}
+
+	/* There is data after the header so the option can't be added
+	 * without moving it, and doing so may make the SYN packet
+	 * itself too large. Accept the packet unmodified instead.
+	 */
+	if (len > tcp_hdrlen)
+		return 0;
+
+	/* tcph->doff has 4 bits, do not wrap it to 0 */
+	if (tcp_hdrlen >= 15 * 4)
+		return 0;
+
+	/*
+	 * MSS Option not found ?! add it..
+	 */
+	if (skb_tailroom(skb) < TCPOLEN_MSS) {
+		if (pskb_expand_head(skb, 0,
+				     TCPOLEN_MSS - skb_tailroom(skb),
+				     GFP_ATOMIC))
+			return -1;
+		tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+	}
+
+	skb_put(skb, TCPOLEN_MSS);
+
+	/*
+	 * IPv4: RFC 1122 states "If an MSS option is not received at
+	 * connection setup, TCP MUST assume a default send MSS of 536".
+	 * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
+	 * length IPv6 header of 60, ergo the default MSS value is 1220
+	 * Since no MSS was provided, we must use the default values
+	 */
+	if (xt_family(par) == NFPROTO_IPV4)
+		newmss = min(newmss, (u16)536);
+	else
+		newmss = min(newmss, (u16)1220);
+
+	opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
+	memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
+
+	inet_proto_csum_replace2(&tcph->check, skb,
+				 htons(len), htons(len + TCPOLEN_MSS), true);
+	opt[0] = TCPOPT_MSS;
+	opt[1] = TCPOLEN_MSS;
+	opt[2] = (newmss & 0xff00) >> 8;
+	opt[3] = newmss & 0x00ff;
+
+	inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
+
+	oldval = ((__be16 *)tcph)[6];
+	tcph->doff += TCPOLEN_MSS/4;
+	inet_proto_csum_replace2(&tcph->check, skb,
+				 oldval, ((__be16 *)tcph)[6], false);
+	return TCPOLEN_MSS;
+}
+
+static unsigned int
+tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	__be16 newlen;
+	int ret;
+
+	ret = tcpmss_mangle_packet(skb, par,
+				   PF_INET,
+				   iph->ihl * 4,
+				   sizeof(*iph) + sizeof(struct tcphdr));
+	if (ret < 0)
+		return NF_DROP;
+	if (ret > 0) {
+		iph = ip_hdr(skb);
+		newlen = htons(ntohs(iph->tot_len) + ret);
+		csum_replace2(&iph->check, iph->tot_len, newlen);
+		iph->tot_len = newlen;
+	}
+	return XT_CONTINUE;
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static unsigned int
+tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	u8 nexthdr;
+	__be16 frag_off, oldlen, newlen;
+	int tcphoff;
+	int ret;
+
+	nexthdr = ipv6h->nexthdr;
+	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
+	if (tcphoff < 0)
+		return NF_DROP;
+	ret = tcpmss_mangle_packet(skb, par,
+				   PF_INET6,
+				   tcphoff,
+				   sizeof(*ipv6h) + sizeof(struct tcphdr));
+	if (ret < 0)
+		return NF_DROP;
+	if (ret > 0) {
+		ipv6h = ipv6_hdr(skb);
+		oldlen = ipv6h->payload_len;
+		newlen = htons(ntohs(oldlen) + ret);
+		if (skb->ip_summed == CHECKSUM_COMPLETE)
+			skb->csum = csum_add(csum_sub(skb->csum, oldlen),
+					     newlen);
+		ipv6h->payload_len = newlen;
+	}
+	return XT_CONTINUE;
+}
+#endif
+
+/* Must specify -p tcp --syn */
+static inline bool find_syn_match(const struct xt_entry_match *m)
+{
+	const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
+
+	if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
+	    tcpinfo->flg_cmp & TCPHDR_SYN &&
+	    !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
+		return true;
+
+	return false;
+}
+
+static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_tcpmss_info *info = par->targinfo;
+	const struct ipt_entry *e = par->entryinfo;
+	const struct xt_entry_match *ematch;
+
+	if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
+	    (par->hook_mask & ~((1 << NF_INET_FORWARD) |
+			   (1 << NF_INET_LOCAL_OUT) |
+			   (1 << NF_INET_POST_ROUTING))) != 0) {
+		pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
+		return -EINVAL;
+	}
+	if (par->nft_compat)
+		return 0;
+
+	xt_ematch_foreach(ematch, e)
+		if (find_syn_match(ematch))
+			return 0;
+	pr_info_ratelimited("Only works on TCP SYN packets\n");
+	return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_tcpmss_info *info = par->targinfo;
+	const struct ip6t_entry *e = par->entryinfo;
+	const struct xt_entry_match *ematch;
+
+	if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
+	    (par->hook_mask & ~((1 << NF_INET_FORWARD) |
+			   (1 << NF_INET_LOCAL_OUT) |
+			   (1 << NF_INET_POST_ROUTING))) != 0) {
+		pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
+		return -EINVAL;
+	}
+	if (par->nft_compat)
+		return 0;
+
+	xt_ematch_foreach(ematch, e)
+		if (find_syn_match(ematch))
+			return 0;
+	pr_info_ratelimited("Only works on TCP SYN packets\n");
+	return -EINVAL;
+}
+#endif
+
+static struct xt_target tcpmss_tg_reg[] __read_mostly = {
+	{
+		.family		= NFPROTO_IPV4,
+		.name		= "TCPMSS",
+		.checkentry	= tcpmss_tg4_check,
+		.target		= tcpmss_tg4,
+		.targetsize	= sizeof(struct xt_tcpmss_info),
+		.proto		= IPPROTO_TCP,
+		.me		= THIS_MODULE,
+	},
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+	{
+		.family		= NFPROTO_IPV6,
+		.name		= "TCPMSS",
+		.checkentry	= tcpmss_tg6_check,
+		.target		= tcpmss_tg6,
+		.targetsize	= sizeof(struct xt_tcpmss_info),
+		.proto		= IPPROTO_TCP,
+		.me		= THIS_MODULE,
+	},
+#endif
+};
+
+static int __init tcpmss_tg_init(void)
+{
+	return xt_register_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
+}
+
+static void __exit tcpmss_tg_exit(void)
+{
+	xt_unregister_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
+}
+
+module_init(tcpmss_tg_init);
+module_exit(tcpmss_tg_exit);
diff --git a/upstream/linux-5.10/net/netfilter/xt_hl.c b/upstream/linux-5.10/net/netfilter/xt_hl.c
new file mode 100755
index 0000000..c1a70f8
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_hl.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IP tables module for matching the value of the TTL
+ * (C) 2000,2001 by Harald Welte <laforge@netfilter.org>
+ *
+ * Hop Limit matching module
+ * (C) 2001-2002 Maciej Soltysiak <solt@dns.toxicfilms.tv>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ipt_ttl.h>
+#include <linux/netfilter_ipv6/ip6t_hl.h>
+
+MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
+MODULE_DESCRIPTION("Xtables: Hoplimit/TTL field match");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_ttl");
+MODULE_ALIAS("ip6t_hl");
+
+static bool ttl_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct ipt_ttl_info *info = par->matchinfo;
+	const u8 ttl = ip_hdr(skb)->ttl;
+
+	switch (info->mode) {
+	case IPT_TTL_EQ:
+		return ttl == info->ttl;
+	case IPT_TTL_NE:
+		return ttl != info->ttl;
+	case IPT_TTL_LT:
+		return ttl < info->ttl;
+	case IPT_TTL_GT:
+		return ttl > info->ttl;
+	}
+
+	return false;
+}
+
+static bool hl_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct ip6t_hl_info *info = par->matchinfo;
+	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+	switch (info->mode) {
+	case IP6T_HL_EQ:
+		return ip6h->hop_limit == info->hop_limit;
+	case IP6T_HL_NE:
+		return ip6h->hop_limit != info->hop_limit;
+	case IP6T_HL_LT:
+		return ip6h->hop_limit < info->hop_limit;
+	case IP6T_HL_GT:
+		return ip6h->hop_limit > info->hop_limit;
+	}
+
+	return false;
+}
+
+static struct xt_match hl_mt_reg[] __read_mostly = {
+	{
+		.name       = "ttl",
+		.revision   = 0,
+		.family     = NFPROTO_IPV4,
+		.match      = ttl_mt,
+		.matchsize  = sizeof(struct ipt_ttl_info),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "hl",
+		.revision   = 0,
+		.family     = NFPROTO_IPV6,
+		.match      = hl_mt6,
+		.matchsize  = sizeof(struct ip6t_hl_info),
+		.me         = THIS_MODULE,
+	},
+};
+
+static int __init hl_mt_init(void)
+{
+	return xt_register_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
+}
+
+static void __exit hl_mt_exit(void)
+{
+	xt_unregister_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
+}
+
+module_init(hl_mt_init);
+module_exit(hl_mt_exit);