ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/package/kernel/mfp/files/Kconfig b/package/kernel/mfp/files/Kconfig
new file mode 100644
index 0000000..4ea775f
--- /dev/null
+++ b/package/kernel/mfp/files/Kconfig
@@ -0,0 +1,20 @@
+
+config MRVL_FASTPATH
+	tristate "Marvell Fastpath"
+	default m
+	depends on NF_CT_NETLINK && NF_CONNTRACK_FASTPATH
+	select NETIF_RX_FASTPATH_HOOK
+	help
+		Enable marvell fastpath in the application processor
+
+if !NF_CT_NETLINK || !NF_CONNTRACK_FASTPATH
+
+comment "Fastpath support disabled"
+
+comment "NF_CT_NETLINK disabled"
+	depends on !NF_CT_NETLINK
+
+comment "NF_CONNTRACK_FASTPATH disabled"
+	depends on !NF_CONNTRACK_FASTPATH
+	
+endif
diff --git a/package/kernel/mfp/files/Makefile b/package/kernel/mfp/files/Makefile
new file mode 100644
index 0000000..fe5b14a
--- /dev/null
+++ b/package/kernel/mfp/files/Makefile
@@ -0,0 +1,27 @@
+ifneq ($(KERNELRELEASE),)
+
+CONFIG_MARVELL_FASTPATH = m
+obj-$(CONFIG_MARVELL_FASTPATH) := mfp.o
+
+mfp-y :=	fp_core.o fp_forward.o fp_learner_nc.o fp_classifier.o \
+		fp_database_hash.o fp_device.o fp_netlink.o fp_ndisc.o lib.a
+mfp-$(CONFIG_ASR_TOE) += fp_cm.o
+lib-y := 	fp_forward.o fp_learner_nc.o fp_classifier.o \
+		fp_database_hash.o fp_device.o fp_netlink.o \
+		fp_ndisc.o
+lib-$(CONFIG_ASR_TOE) += fp_cm.o
+
+else
+
+all:
+	@echo
+	@echo "usage:"
+	@echo "      make -C <kernel_build_dir> M=\`pwd\` ARCH=arm CROSS_COMPILE=<...> modules"
+	@echo
+	$(error)
+clean:
+	rm -f *.o .*.cmd modules.order Module.symvers mfp.ko mfp.mod.c
+	rm -rf .tmp_versions
+
+endif
+
diff --git a/package/kernel/mfp/files/fp_classifier.c b/package/kernel/mfp/files/fp_classifier.c
new file mode 100644
index 0000000..268e47a
--- /dev/null
+++ b/package/kernel/mfp/files/fp_classifier.c
@@ -0,0 +1,902 @@
+/*
+ *	Fast path Classifier
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "mfp" " classifier:%s:%d: " fmt, __func__, __LINE__
+
+#include "fp_common.h"
+#include "fp_database.h"
+#include "fp_device.h"
+#include "fp_core.h"
+
+struct fpc_stats {
+	u32 total;
+	u32 slow;
+	u32 fast;
+};
+
+static struct fpc_stats stats;
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0)
+static unsigned int udp_ct_timeout = UDP_DEFAULT_TIMEOUT;
+static unsigned int udp_ct_timeout_stream = UDP_DEFAULT_TIMEOUT_STREAM;
+static unsigned int tcp_ct_timeout = TCP_DEFAULT_TIMEOUT;
+#endif
+static int fp_acct_flag = 1;
+static int fp_ip_log_en = 255;
+static int fp_ip_log_pkt_num;
+static unsigned char *fp_ip_log_buf;
+static int fp_ip_log_index;
+#define ONE_IP_LOG_LEN	96
+
+//#define CONFIG_SET_HL_64
+
+static inline int fp_ip_decrease_ttl(struct sk_buff *skb)
+{
+	if (ip_hdr(skb)->version == 4)
+		return ip_decrease_ttl(ip_hdr(skb));
+	else
+		return --ipv6_hdr(skb)->hop_limit;
+}
+
+/* builds a tuple according to the parameters received) */
+static inline void build_tuple(const struct sk_buff *skb,
+				struct nf_conntrack_tuple *tuple)
+{
+	int proto;
+	struct udphdr *udph;
+	/* Fill l3 info */
+	if (ip_hdr(skb)->version == 4) {
+		tuple->src.l3num = AF_INET;
+		tuple->src.u3.ip = ip_hdr(skb)->saddr;
+		tuple->dst.u3.ip = ip_hdr(skb)->daddr;
+		proto = ip_hdr(skb)->protocol;
+	} else {
+		tuple->src.l3num = AF_INET6;
+		tuple->src.u3.in6 = ipv6_hdr(skb)->saddr;
+		tuple->dst.u3.in6 = ipv6_hdr(skb)->daddr;
+		proto = ipv6_hdr(skb)->nexthdr;
+	}
+
+	/* Fill l4 info*/
+	udph = (struct udphdr *)skb_transport_header(skb);
+	tuple->dst.protonum = proto;
+	tuple->dst.u.all = udph->dest;
+	tuple->src.u.all = udph->source;
+	tuple->dst.dir = 0;
+}
+
+static inline void log_ip_pkt(const struct sk_buff *skb, unsigned char *buf)
+{
+	struct tcphdr *tcph;
+	struct iphdr *piphdr;
+	struct ipv6hdr *pipv6hdr;
+	struct timespec64 ts;
+	piphdr = ip_hdr(skb);
+
+	ktime_get_real_ts64(&ts);
+	memcpy(buf, &ts.tv_sec, 8);
+	buf += 8;
+	memcpy(buf, &ts.tv_nsec, 4);
+	buf += 4;
+	/* Fill l3 info */
+	if (piphdr->version == 4) {
+		*buf = 4;
+		buf += 1;
+		*buf = piphdr->protocol;
+		buf += 1;
+		memcpy(buf, &piphdr->id, 2);
+		buf += 2;
+		memcpy(buf, &piphdr->tot_len, 2);
+		buf += 4;
+		memcpy(buf, &piphdr->saddr, 4);
+		buf += 16;
+		memcpy(buf, &piphdr->daddr, 4);
+		buf += 16;
+	} else {
+		pipv6hdr = ipv6_hdr(skb);
+		*buf = 6;
+		buf += 1;
+		*buf = pipv6hdr->nexthdr;
+		buf += 1;
+		*buf = 0;
+		*(buf+1) = 0;
+		buf += 2;
+		memcpy(buf, &pipv6hdr->payload_len, 2);
+		buf += 4;
+		memcpy(buf, &pipv6hdr->saddr, 16);
+		buf += 16;
+		memcpy(buf, &pipv6hdr->daddr, 16);
+		buf += 16;
+	}
+
+	/* Fill l4 info*/
+	tcph = (struct tcphdr *)skb_transport_header(skb);
+
+	memcpy(buf, &tcph->source, 2);
+	buf += 2;
+	memcpy(buf, &tcph->dest, 2);
+	buf += 2;
+
+	memcpy(buf, &tcph->seq, 4);
+	buf += 4;
+
+	memcpy(buf, &tcph->ack_seq, 4);
+	buf += 4;
+	memcpy(buf, ((char *)&tcph->ack_seq)+4, 2);
+	buf += 2;
+
+
+}
+
+/* checksum adjust (inline) */
+static inline void fpc_checksum(unsigned char *chksum,
+				  unsigned char *optr, unsigned long olen,
+				  unsigned char *nptr, unsigned long nlen,
+				  int proto)
+{
+	long x, old, neu;
+
+	if (proto == IPPROTO_UDP && *(__sum16 *)chksum == 0)
+		return;
+
+	x = chksum[0] * 256 + chksum[1];
+	x = ~x & 0xFFFF;
+	while (olen) {
+		old = optr[0] * 256 + optr[1];
+		optr += 2;
+		x -= old & 0xffff;
+		if (x <= 0) {
+			x--;
+			x &= 0xffff;
+		}
+		olen -= 2;
+	}
+
+	while (nlen) {
+		neu = nptr[0] * 256 + nptr[1];
+		nptr += 2;
+		x += neu & 0xffff;
+		if (x & 0x10000) {
+			x++;
+			x &= 0xffff;
+		}
+		nlen -= 2;
+	}
+	x = ~x & 0xFFFF;
+	chksum[0] = (unsigned char)(x / 256);
+	chksum[1] = (unsigned char)(x & 0xff);
+}
+
+static inline int fp_hard_header(struct sk_buff *skb, struct fpdb_entry *e)
+{
+	struct hh_cache *hh = &e->hh;
+	int hh_len = hh->hh_len;
+	unsigned int hh_alen = 0;
+	unsigned int headroom;
+
+	if (!hh_len)
+		return 0;
+
+	headroom = skb_headroom(skb);
+	if (likely(hh_len <= HH_DATA_MOD)) {
+		hh_alen = HH_DATA_MOD;
+
+		/* this is inlined by gcc */
+		if (likely(headroom >= HH_DATA_MOD))
+			memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+				HH_DATA_MOD);
+	} else {
+		hh_alen = HH_DATA_ALIGN(hh_len);
+
+		if (likely(headroom >= hh_alen))
+			memcpy(skb->data - hh_alen, hh->hh_data,
+				hh_alen);
+	}
+
+	if (WARN_ON_ONCE(headroom < hh_alen))
+		return 1;
+
+	skb_push(skb, hh_len);
+
+	return 0;
+}
+
+/**
+ * Refresh ct (reschedule timeout)
+ *
+ * @param skb
+ * @param el
+ * @param acct   do accounting
+ */
+static inline void fpc_refresh(struct sk_buff *skb, struct fpdb_entry *el, int acct)
+{
+	struct nf_conn *ct = el->ct;
+	const struct nf_conntrack_l4proto *l4proto;
+	enum ip_conntrack_info ctinfo = el->dir ? IP_CT_IS_REPLY : 0;
+	unsigned long extra_jiffies = 0;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0)
+	unsigned int *timeouts;
+#endif
+
+	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+	NF_CT_ASSERT(l4proto);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0)
+	if (l4proto->l4proto == IPPROTO_TCP) {
+		timeouts = nf_tcp_pernet(nf_ct_net(ct))->timeouts;
+		WARN_ON(ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED);
+		extra_jiffies = timeouts[TCP_CONNTRACK_ESTABLISHED];
+	} else if (l4proto->l4proto == IPPROTO_UDP) {
+		timeouts = nf_udp_pernet(nf_ct_net(ct))->timeouts;
+		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+			extra_jiffies =	timeouts[UDP_CT_REPLIED];
+		else
+			extra_jiffies =	timeouts[UDP_CT_UNREPLIED];
+	}
+#else
+	if (l4proto->l4proto == IPPROTO_TCP) {
+		WARN_ON(ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED);
+		extra_jiffies = tcp_ct_timeout;
+	} else if (l4proto->l4proto == IPPROTO_UDP) {
+		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+			extra_jiffies =	udp_ct_timeout_stream;
+		else
+			extra_jiffies =	udp_ct_timeout;
+	}
+#endif
+	__nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, acct);
+
+	fpdb_trace(el, (l4proto->l4proto == IPPROTO_TCP) ? tcp_hdr(skb) : NULL);
+}
+
+/**
+ * Modify skb as if it was forwarded by the ip stack:
+ * L2: Add MAC Header, set skb->pkt_type = PACKET_HOST
+ * L3: Decrement ttl, NAT, checksum
+ * L4: Checksum
+ *
+ * @param skb    skb to modify
+ * @param el     fpdb_entry related to this connection
+ */
+static inline int fpc_modify(struct sk_buff *skb,
+			       struct fpdb_entry *el)
+{
+	int version = ip_hdr(skb)->version;
+	int proto = (version == 4) ? ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+	struct udphdr *udph = udp_hdr(skb);
+	struct tcphdr *tcph = tcp_hdr(skb);
+
+	/**
+	 *  skb->pkt_type can be either PACKET_HOST or PACKET_OTHERHOST
+	 *  (see fpc_classify_start). We also know that this flow passed
+	 *  through slowpath (otherwise fastpath connection would not
+	 *  have been created in the first place). Therefore it is safe
+	 *  to change the pkt_type since this is what the IP Stack would
+	 *  have done.
+	 *
+	 *  Slowpath behavior:
+	 *  PACKET_OTHERHOST is set by the receiving interface if the
+	 *  dest MAC is different from it's MAC address. In this case
+	 *  this means that the packet is not destined to us and is
+	 *  dropped. The only exception is if the receiving interface is
+	 *  behind a bridge. In this case, the dest MAC in packets sent
+	 *  outside the LAN is the bridge MAC address, in which case the
+	 *  bridging code sets the pkt_type to PACKET_HOST before
+	 *  routing the packet. Packes withing the LAN sre bridged and
+	 *  are not passed to the upper layers, and therefore doesn't go
+	 *  through fastpath unless CONFIG_BRIDGE_NETFILTER is enabled -
+	 *  which is the only case where fastpath "misbehaves" and sets
+	 *  the pkt_type to PACKET_HOST for bridged packets - this might
+	 *  need revision in the future.
+	 */
+	skb->pkt_type = PACKET_HOST;
+
+	if (fp_hard_header(skb, el))
+		return 1;
+
+	fp_ip_decrease_ttl(skb);
+
+	/* NAT (incase used by this connection) */
+	if (NF_CT_NAT(el->ct)) {
+		void *old, *new;
+		unsigned int size;
+		__sum16 *check;
+
+		/* NAT L3 ip addresses manipulation */
+		if (likely(version == 4)) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->saddr = el->out_tuple.dst.u3.ip;
+			iph->daddr = el->out_tuple.src.u3.ip;
+		#ifdef CONFIG_SET_HL_64
+			iph->ttl = 64;
+		#endif
+			ip_send_check(iph); /*IPv4 checksum */
+		} else {
+			struct ipv6hdr *iph = ipv6_hdr(skb);
+			iph->saddr = el->out_tuple.dst.u3.in6;
+			iph->daddr = el->out_tuple.src.u3.in6;
+		#ifdef CONFIG_SET_HL_64
+			iph->hop_limit = 64;
+		#endif
+		}
+
+		/* Adjust transport header checksum */
+		check = (proto == IPPROTO_UDP) ? &udph->check : &tcph->check;
+		size = (version == 4) ? 4 : 16;
+		old = &el->in_tuple.src.u3.in6;
+		new = &el->out_tuple.dst.u3.in6;
+		fpc_checksum((u8 *)check, old, size, new, size, proto);
+		old = &el->in_tuple.dst.u3.in6;
+		new = &el->out_tuple.src.u3.in6;
+		fpc_checksum((u8 *)check, old, size, new, size, proto);
+
+
+		/* NAT L4 ports manipulation */
+		size = sizeof(__be16);
+		old = &el->in_tuple.dst.u.all;
+		new = &el->out_tuple.src.u.all;
+		if (*(__be16 *)old != *(__be16 *)new) {
+			udph->dest = *(__be16 *)new;
+			fpc_checksum((u8 *)check, old, size, new, size, proto);
+		}
+		old = &el->in_tuple.src.u.all;
+		new = &el->out_tuple.dst.u.all;
+		if (*(__be16 *)old != *(__be16 *)new) {
+			udph->source = *(__be16 *)new;
+			fpc_checksum((u8 *)check, old, size, new, size, proto);
+		}
+	}
+
+	return 0;
+}
+
+static inline bool ipv4_is_fragmented(struct iphdr *iph)
+{
+	__be16 df = iph->frag_off & htons(IP_DF);
+	return (iph->frag_off && !df);
+}
+
+static inline int parse_headers(struct sk_buff *skb)
+{
+	int ihl, proto;
+
+	BUG_ON(!skb);
+	skb_reset_network_header(skb);
+
+	/* L3 Protocol parsing */
+	if (likely(ip_hdr(skb)->version == 4)) {
+		ihl = ip_hdr(skb)->ihl * 4;
+		proto = ip_hdr(skb)->protocol;
+
+		/*ipv4 sanity checks*/
+		if (unlikely(ihl > sizeof(struct iphdr))) {
+			pr_debug("ipv4 options in header\n");
+			return 0;
+		}
+		/* check ttl */
+		if (unlikely(ip_hdr(skb)->ttl == 1)) {
+			pr_debug("ip->ttl==1\n");
+			return 0;
+		}
+		/* check fragmantation */
+		if (unlikely(ipv4_is_fragmented(ip_hdr(skb)))) {
+			pr_debug("fragmented packet (frag_offs=%x)\n",
+				ntohs(ip_hdr(skb)->frag_off));
+			return 0;
+		}
+		/* ipv4 reassembled pkts */
+		if (unlikely(skb->data_len)) {
+			pr_debug("ipv4 reassembled pkts --> send to slowpath\n");
+			return 0;
+		}
+	} else if (likely(ip_hdr(skb)->version == 6)) {
+		ihl = sizeof(struct ipv6hdr); /* without extentions */
+		proto = ipv6_hdr(skb)->nexthdr;
+
+		/* ipv6 sanity checks */
+		if (unlikely(ipv6_hdr(skb)->hop_limit == 1)) {
+			pr_debug("ip->ttl==1 --> send to slowpath\n");
+			return 0;
+		}
+
+		/* ipv6 reassembled pkts */
+		if (unlikely(skb->data_len)) {
+			pr_debug("ipv6 reassembled pkts --> send to slowpath\n");
+			return 0;
+		}
+	} else {
+		/* Not an IP packet (neither ipv4 nor ipv6) */
+		pr_debug("not an IP packet\n");
+		return 0;
+	}
+
+	/* L4 Protocol parsing */
+	skb_set_transport_header(skb, ihl);
+
+	if (proto == IPPROTO_TCP) {
+		struct tcphdr *th = tcp_hdr(skb);
+
+		if (tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_FIN)) {
+			pr_debug("tcp rst or fin\n");
+			return 0;
+		}
+	} else if (proto != IPPROTO_UDP) {
+		pr_debug("not a TCP or UDP packet\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+#define NETIF_INVALID(x) (!(x) || !netif_device_present(x) || \
+			   !netif_running(x) || !netif_carrier_ok(x))
+
+/**
+ * finish classification for this database entry.
+ * If skb is not NULL, it is tracked & mangled.
+ *
+ * @param skb    skb to mangle & track, or NULL if not desired
+ * @param el     fpdb_entry previously aquired by fpc_classify
+ */
+int fpc_classify_finish(struct sk_buff *skb, struct fpdb_entry *el)
+{
+	int ret = 0;
+
+	if (skb) {
+		fpc_refresh(skb, el, fp_acct_flag);
+		if (fpc_modify(skb, el)) {
+			ret = 1;
+			goto exit;
+		}
+
+		/* update timestamp if fpdb used */
+		el->tstamp = jiffies;
+		if (!el->tstamp)
+			el->tstamp = 1;
+	}
+exit:
+	fpdb_put(el);
+	return ret;
+}
+
+/**
+ * Classifies an skb as fast or slow, without changing the skb.
+ * Caller MUST call fpc_classify_finish to free the database entry.
+ *
+ * @param skb    skb to classify
+ *
+ * @return fpdb_entry for this skb
+ */
+struct fpdb_entry *fpc_classify_start(struct sk_buff *skb, struct nf_conntrack_tuple *tuple)
+{
+	struct fpdb_entry *el = NULL;
+	struct net_device *src, *dst;
+	int tmp_log_pkt_index;
+	unsigned char *plog_pos;
+
+	BUG_ON(!skb);
+	BUG_ON(!skb->dev); /* eth_type_trans always sets skb->dev - we count on it here */
+
+	src = skb->dev;
+	stats.total++;
+
+	if (unlikely(skb_headroom(skb) < ETH_HLEN)) {
+		pr_debug("No room for MAC header in skb\n");
+		goto slowpath;
+	}
+
+	/* source device sanity checks */
+	if (unlikely(NETIF_INVALID(src))) {
+		pr_debug("src (%s) state invalid (%lu)\n", src->name, src->state);
+		goto slowpath;
+	}
+
+	memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
+	if (unlikely(!parse_headers(skb)))
+		goto slowpath;
+
+	/* Check fp_database for match */
+	build_tuple(skb, tuple);
+	if (1 == fp_ip_log_en) {
+		tmp_log_pkt_index = fp_ip_log_index++;
+		if (fp_ip_log_index > fp_ip_log_pkt_num - 50)
+			fp_ip_log_index = 0;
+
+		plog_pos = fp_ip_log_buf + tmp_log_pkt_index*ONE_IP_LOG_LEN;
+		log_ip_pkt(skb, plog_pos);
+	}
+	el = fpdb_get(tuple);
+	if (unlikely(!el))
+		goto slowpath;
+
+	if (unlikely(el->block)) {
+		pr_debug("entry blocked, send to slowpath\n");
+		goto slowpath;
+	}
+
+	if (unlikely(nf_ct_protonum(el->ct) == IPPROTO_TCP) &&
+		     el->ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) {
+		pr_debug("tcp connection state not established\n");
+		goto slowpath;
+	}
+
+	if (unlikely(el->in_dev->dev != src &&
+		     el->in_dev->br != src)) {
+		/**
+		 * Since entry can be updated (due to route changes) this case
+		 * is legal for a short period of time in which packets are
+		 * received using the old entry and transmitted using the new
+		 * one. Since we dont knwo if this is the case or not we will
+		 * just forward this packets to slowpath to decide what to do.
+		 */
+		pr_debug("in_dev->dev=%s(%p) != src=%s(%p)\n",
+			el->in_dev->dev->name, el->in_dev->dev, src->name, src);
+		goto slowpath;
+	}
+
+	if (unlikely(!el->in_dev->forward || !el->out_dev->forward)) {
+		pr_debug("forwarding disabled (%s forward=%d, %s forward=%d)\n",
+				el->in_dev->dev->name, el->in_dev->forward,
+				el->out_dev->dev->name, el->out_dev->forward);
+		goto slowpath;
+	}
+
+	dst = el->out_dev->dev;
+	if (unlikely(NETIF_INVALID(dst))) {
+		pr_debug("dst (%s) state invalid (%lu)\n", dst->name, dst->state);
+		goto slowpath;
+	}
+
+	if (unlikely(dst->mtu < skb->len)) {
+		pr_info_once("mtu (%d) < len (%d)\n", dst->mtu, skb->len);
+		goto slowpath;
+	}
+
+	if (unlikely(dst == src)) {
+		/* src == dst entries should be blocked, it's a bug otherwise */
+		/* here we don't need to dump entry. It will cause assert */
+		/* because it takes a lot of time yhuang 20160622 */
+		pr_err("Bug in classifier dst_dev==src_dev(%s), block=%d\n",
+		src->name, (unsigned int)el->block);
+		/* FP_ERR_DUMP_ENTRY(NULL, el); */
+		/* BUG_ON(debug_level & DBG_WARN_AS_ERR); */
+		goto slowpath;
+
+	}
+
+	if (unlikely(dst->header_ops && !el->hh.hh_len)) {
+		pr_debug("hh_cache not valid, send to slowpath\n");
+		goto slowpath;
+	}
+
+	if (unlikely(skb->pkt_type != PACKET_HOST &&
+		     skb->pkt_type != PACKET_OTHERHOST)) {
+		pr_debug("invalid skb->pkt_type(%d)\n", skb->pkt_type);
+		goto slowpath;
+	}
+
+	pr_debug("Packet from %s to %s (pkt_p %p len %d) classified as fast path\n",
+		 src->name, dst->name, skb->data, skb->len);
+	stats.fast++;
+	return el;
+
+slowpath:
+	if (el)
+		fpdb_put(el);
+	pr_debug("Packet from %s (pkt_p %p len %d) classified as slow path\n",
+		 src->name, skb->data, skb->len);
+	stats.slow++;
+	return NULL;
+
+}
+
+
+/**
+ * classify, mangle, track and hold the output device
+ * Caller MUST release the device with fp_dev_put() once finished.
+ *
+ * @param skb    skb to classify and mangle
+ *
+ * @return destination fp_net_device or NULL if classified as
+ *         slow path
+ */
+struct fp_net_device *fpc_classify(struct sk_buff *skb)
+{
+	struct fpdb_entry *el;
+	struct fp_net_device *fdev;
+	struct nf_conntrack_tuple tuple;
+
+	el = fpc_classify_start(skb, &tuple);
+	if (unlikely(!el))
+		return NULL;
+	fdev = fpdev_hold(el->out_dev);
+	if (fpc_classify_finish(skb, el))
+		return NULL;
+
+	return fdev;
+}
+
+static ssize_t stats_show(struct fastpath_module *m, char *buf)
+{
+	int len;
+
+	len = sprintf(buf, "Fast Path Classifier statistics:\n");
+
+	len += sprintf(buf + len, "Total Classified %d ", stats.total);
+	len += sprintf(buf + len, "(Fast %d, Slow %d)\n", stats.fast, stats.slow);
+
+	return len;
+}
+
+static ssize_t stats_clear(struct fastpath_module *m, const char *buf,
+			    size_t count)
+{
+	pr_debug("reset stats...\n");
+	memset(&stats, 0, sizeof(stats));
+	return count;
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0)
+static ssize_t udp_ct_timeout_set(struct fastpath_module *m, const char *buf,
+				   size_t count)
+{
+	unsigned int sec;
+	sscanf(buf, "%u", &sec);
+	udp_ct_timeout = sec * HZ;
+	return count;
+}
+
+static ssize_t udp_ct_timeout_get(struct fastpath_module *m, char *buf)
+{
+	unsigned int sec = udp_ct_timeout / HZ;
+	return sprintf(buf, "%u\n", sec);
+}
+
+static ssize_t tcp_ct_timeout_set(struct fastpath_module *m, const char *buf,
+				   size_t count)
+{
+	unsigned int sec;
+	sscanf(buf, "%u", &sec);
+	tcp_ct_timeout = sec * HZ;
+	return count;
+}
+
+static ssize_t tcp_ct_timeout_get(struct fastpath_module *m, char *buf)
+{
+	unsigned int sec = tcp_ct_timeout / HZ;
+	return sprintf(buf, "%u\n", sec);
+}
+
+
+static FP_ATTR(udp_ct_timeout, S_IRUGO|S_IWUSR, udp_ct_timeout_get, udp_ct_timeout_set);
+static FP_ATTR(tcp_ct_timeout, S_IRUGO|S_IWUSR, tcp_ct_timeout_get, tcp_ct_timeout_set);
+#endif
+
+static ssize_t fp_acct_set(struct fastpath_module *m, const char *buf,
+				   size_t count)
+{
+	int flag;
+	sscanf(buf, "%d", &flag);
+	fp_acct_flag = flag;
+	return count;
+}
+
+static ssize_t fp_acct_get(struct fastpath_module *m, char *buf)
+{
+	int flag = fp_acct_flag;
+	return sprintf(buf, "%d\n", flag);
+}
+
+
+static ssize_t fp_ip_log_set(struct fastpath_module *m, const char *buf,
+				   size_t count)
+{
+	int flag;
+	int old_flag;
+	int num;
+	int ret;
+	struct file *filep;
+	mm_segment_t old_fs;
+
+	sscanf(buf, "%d", &flag);
+	switch (flag) {
+	case 0:
+		fp_ip_log_en = flag;
+		pr_err("fp_ip_log_set: disable ip_log:fp_ip_log_index=%d to 0\n",
+			fp_ip_log_index);
+		fp_ip_log_index = 0;
+		break;
+	case 1:
+		fp_ip_log_index = 0;
+		sscanf(buf, "%d,%d", &flag, &num);
+
+		if (fp_ip_log_buf == NULL) {
+			fp_ip_log_buf = kzalloc(ONE_IP_LOG_LEN*num, GFP_KERNEL);
+			if (fp_ip_log_buf == NULL)
+				pr_err("fp_ip_log_set: %d,%d,%d, but malloc failed\n",
+				flag, num, fp_ip_log_index);
+			else
+				pr_err("fp_ip_log_set: %d,%d,%d, buf=%x, size=%d\n",
+				flag, num, fp_ip_log_index,
+				(unsigned int)fp_ip_log_buf,
+				num*ONE_IP_LOG_LEN);
+		} else {
+
+			pr_err(" fp_ip_log_set: buffer has been allocated:%d\n",
+				fp_ip_log_pkt_num);
+		}
+		fp_ip_log_pkt_num = num;
+		fp_ip_log_en = flag;
+		break;
+
+	case 2:
+		old_flag = fp_ip_log_en;
+		pr_err("fp_ip_log_set: output buf to file(tmp/iplog.txt):\
+			old_flag=%d index=%d\n",
+			old_flag, fp_ip_log_index);
+		fp_ip_log_en = 2;
+/*Don't delete this part of code. It's for reference on data structure
+		{
+			char* pex_log_pos;
+			unsigned int* ptime_h;
+			unsigned int* ptime_l;
+			unsigned short* pver;
+			unsigned short* ppro;
+			unsigned short* plen;
+			unsigned int* psadd;
+			unsigned int* pdadd;
+			unsigned short* psport;
+			unsigned short* pdport;
+			unsigned int* pseq;
+			unsigned int* pack_seq;
+			int i;
+
+			for (i = 0; i < 2; i++) {
+				pex_log_pos = fp_ip_log_buf+i*ONE_IP_LOG_LEN;
+				ptime_h = (unsigned int*)pex_log_pos;
+				pex_log_pos +=4;
+				ptime_l = (unsigned int*)pex_log_pos;
+				pex_log_pos +=4;
+				pver = (unsigned short*)pex_log_pos;
+				pex_log_pos +=2;
+				ppro = (unsigned short*)pex_log_pos;
+				pex_log_pos +=2;
+				plen = (unsigned short*)pex_log_pos;
+				pex_log_pos +=4;
+				psadd = (unsigned int*)pex_log_pos;
+				pex_log_pos += 16;
+				pdadd = (unsigned int*) pex_log_pos;
+				pex_log_pos+=16;
+				psport = (unsigned short*) pex_log_pos;
+				pex_log_pos +=2;
+				pdport = (unsigned short*) pex_log_pos;
+				pex_log_pos+=2;
+				pseq = (unsigned int*)pex_log_pos;
+				pex_log_pos +=4;
+				pack_seq =(unsigned int*)pex_log_pos;
+
+				pr_err("Time:%x %x, ver*pro:%x, pid:%x, len:%x,
+				sadd:%x, dadd:%x, sport:%x, dport:%x,
+				seq;%x, ack_seq:%x\n",
+				*ptime_h, *ptime_l, *pver, *ppro, *plen,
+				*psadd, *pdadd, *psport, *pdport,
+				*pseq, *pack_seq);
+			}
+		}
+*/
+		filep = filp_open("/tmp/iplog.bin", O_RDWR|O_CREAT, 0644);
+		if (IS_ERR(filep)) {
+			pr_err("fp_ip_log_set: fail to open IP log file\n");
+		} else {
+			old_fs = get_fs();
+			set_fs(KERNEL_DS);
+			filep->f_pos = 0;
+			ret = filep->f_op->write(filep, fp_ip_log_buf,
+				ONE_IP_LOG_LEN*fp_ip_log_pkt_num,
+				&filep->f_pos);
+			set_fs(old_fs);
+			pr_err("fp_ip_log_set: write to /tmp/iplog.bin, ret=%d\n",
+				ret);
+		}
+		filp_close(filep, NULL);
+		fp_ip_log_en = old_flag;
+		break;
+	case 3:
+		fp_ip_log_en = flag;
+		if (fp_ip_log_buf != NULL) {
+			kfree(fp_ip_log_buf);
+			pr_err("fp_ip_log_set: free the buffer\n");
+			fp_ip_log_buf = NULL;
+		} else {
+			pr_err("fp_ip_log_set: buffer is NULL\n");
+		}
+		break;
+	default:
+		fp_ip_log_en = flag;
+		pr_err("fp_ip_log_set: not support this command:\
+			%d, but the log will stop\n", flag);
+		break;
+	}
+	return count;
+}
+
+static ssize_t fp_ip_log_get(struct fastpath_module *m, char *buf)
+{
+	int flag = fp_ip_log_en;
+	int num = fp_ip_log_pkt_num;
+	return sprintf(buf, "%d,%d buf=%x\n",
+		flag,
+		num,
+		(unsigned int)fp_ip_log_buf);
+}
+
+
+static FP_ATTR(fp_acct_flag, S_IRUGO|S_IWUSR, fp_acct_get, fp_acct_set);
+static FP_ATTR(fp_ip_log, S_IRUGO|S_IWUSR, fp_ip_log_get, fp_ip_log_set);
+static FP_ATTR(stats, S_IRUGO|S_IWUSR, stats_show, stats_clear);
+
+static struct attribute *fp_classifier_attrs[] = {
+	&fp_attr_stats.attr,
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0)
+	&fp_attr_udp_ct_timeout.attr,
+	&fp_attr_tcp_ct_timeout.attr,
+#endif
+	&fp_attr_fp_acct_flag.attr,
+	&fp_attr_fp_ip_log.attr,
+	NULL, /* need to NULL terminate the list of attributes */
+};
+
+static void fp_classifier_release(struct kobject *kobj)
+{
+	struct fastpath_module *module = to_fpmod(kobj);
+
+	pr_debug("fp_classifier released\n");
+	kfree(module);
+}
+
+static struct kobj_type ktype_classifier = {
+	.sysfs_ops	= &fp_sysfs_ops,
+	.default_attrs	= fp_classifier_attrs,
+	.release	= fp_classifier_release,
+};
+
+static int fp_classifier_probe(struct fastpath_module *module)
+{
+	int ret;
+
+	module->priv = NULL;
+	snprintf(module->name, sizeof(module->name), "fp_classifier");
+
+	kobject_init(&module->kobj, &ktype_classifier);
+	ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
+	if (ret < 0) {
+		pr_err("kobject_add failed (%d)\n", ret);
+		kobject_put(&module->kobj);
+		return ret;
+	}
+	kobject_uevent(&module->kobj, KOBJ_ADD);
+
+	pr_debug("fp_classifier probed\n");
+	return 0;
+}
+
+static int fp_classifier_remove(struct fastpath_module *module)
+{
+	kobject_put(&module->kobj);
+
+	pr_debug("fp_classifier removed\n");
+	return 0;
+}
+
+struct fastpath_module_ops fp_classifier_ops = {
+	.probe = fp_classifier_probe,
+	.remove = fp_classifier_remove,
+};
+
diff --git a/package/kernel/mfp/files/fp_classifier.h b/package/kernel/mfp/files/fp_classifier.h
new file mode 100644
index 0000000..6a64dd4
--- /dev/null
+++ b/package/kernel/mfp/files/fp_classifier.h
@@ -0,0 +1,21 @@
+#ifndef FP_CLASSIFIER_H
+#define FP_CLASSIFIER_H
+
+/*
+ *	Fast path classifier
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+/**--------------------------------------
+ * API FUNCTIONS
+ *--------------------------------------*/
+
+struct fpdb_entry *fpc_classify_start(struct sk_buff *skb, struct nf_conntrack_tuple *tuple);
+int fpc_classify_finish(struct sk_buff *skb, struct fpdb_entry *el);
+struct fp_net_device *fpc_classify(struct sk_buff *skb);
+
+#endif /* FP_CLASSIFIER_H */
diff --git a/package/kernel/mfp/files/fp_cm.c b/package/kernel/mfp/files/fp_cm.c
new file mode 100644
index 0000000..541b41a
--- /dev/null
+++ b/package/kernel/mfp/files/fp_cm.c
@@ -0,0 +1,1100 @@
+/*
+ *	Fastpath Cm Interface
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU FP_ERR( Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/if_vlan.h>
+#include "fp_common.h"
+#include "fp_database.h"
+#include "fp_device.h"
+#include "fp_core.h"
+#include "../linux/drivers/marvell/toev2/toe.h"
+#include "../linux/drivers/marvell/toev2/toe_464xlat.h"
+
+
+#define MAXLEN 256
+#define DEVICE_NAME_MAXSIZE 64
+#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
+
+static u32 g_cm_nlpid = -1;
+static u32 speed_thresh = 1000; /* kbps */
+
+static int fp_cm_genl_get_tuple(struct sk_buff *skb, struct genl_info *info);
+static int fp_cm_genl_del_tuple(struct sk_buff *skb, struct genl_info *info);
+static int fp_cm_genl_set_tuple(struct sk_buff *skb, struct genl_info *info);
+static int fp_cm_genl_set_pid(struct sk_buff *skb, struct genl_info *info);
+static void fp_cm_update_genl_pid(u32 pid);
+static u32 fp_cm_get_genl_pid(void);
+
+/* attribute type */
+enum fp_cm_genl_attrs {
+	CM_ATTR_UNSPEC,
+	CM_ATTR_PID,
+	CM_ATTR_SRC_IP,
+	CM_ATTR_DST_IP,
+	CM_ATTR_SRC_IP6,		/* struct in6_addr */
+	CM_ATTR_DST_IP6,		/* struct in6_addr */
+	CM_ATTR_SRC_PORT,
+	CM_ATTR_DST_PORT,
+	CM_ATTR_PROTO,
+	CM_ATTR_SRC_MAC,
+	CM_ATTR_DST_MAC,
+	CM_ATTR_SNAT,
+	CM_ATTR_FWD,
+	CM_ATTR_NAT_PORT,
+	CM_ATTR_NAT_IP,
+	CM_ATTR_DEVICE_NAME,
+	CM_ATTR_MCID,
+	CM_ATTR_RBID,
+	CM_ATTR_QFI,
+	CM_ATTR_PDU,
+	CM_ATTR_IN_PKT,
+	CM_ATTR_OUT_PKT,
+	CM_ATTR_VLAN_EN,
+	CM_ATTR_VLANID,
+	CM_ATTR_XLAT_EN,
+	CM_ATTR_XLAT_INSTANCE,
+	CM_ATTR_UPDATE_TUPLE,
+/* private: internal use only */
+	__FP_CM_ATTR_AFTER_LAST
+};
+#define FP_CM_ATTR_MAX (__FP_CM_ATTR_AFTER_LAST - 1)
+
+/* commands */
+enum fp_cm_commands {
+	CM_CMD_UNSPEC,
+	CM_CMD_SET_PID,
+	CM_CMD_GET_TUPLE,
+	CM_CMD_SET_TUPLE,
+	CM_CMD_DEL_TUPLE,
+	__FP_CM_CMD_AFTER_LAST,
+};
+#define FP_CM_CMD_MAX (__FP_CM_CMD_AFTER_LAST - 1)
+
+#define ETH_TYPE_LEN		2
+#define FP_CM_NLMSG_DEFAULT_SIZE  256
+
+
+/* attribute policy */
+static struct nla_policy fp_cm_genl_policy[FP_CM_ATTR_MAX + 1] = {
+	[CM_ATTR_PID] = { .type = NLA_U32 },
+	[CM_ATTR_SRC_IP] = { .type = NLA_U32 },
+	[CM_ATTR_DST_IP] = { .type = NLA_U32 },
+	[CM_ATTR_SRC_IP6] = {
+		.type = NLA_BINARY,
+		.len = sizeof(struct in6_addr),
+	},
+	[CM_ATTR_DST_IP6] = {
+		.type = NLA_BINARY,
+		.len = sizeof(struct in6_addr),
+	},
+	[CM_ATTR_SRC_PORT] = { .type = NLA_U16 },
+	[CM_ATTR_DST_PORT] = { .type = NLA_U16 },
+	[CM_ATTR_PROTO] = { .type = NLA_U8 },
+	[CM_ATTR_SNAT] = { .type = NLA_U8 },
+	[CM_ATTR_FWD] = { .type = NLA_U8 },
+	[CM_ATTR_NAT_PORT] = { .type = NLA_U16 },
+	[CM_ATTR_NAT_IP] = { .type = NLA_U32 },
+	[CM_ATTR_SRC_MAC] = { .type = NLA_STRING},
+	[CM_ATTR_DST_MAC] = { .type = NLA_STRING},
+	[CM_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
+			.len = DEVICE_NAME_MAXSIZE },
+	[CM_ATTR_MCID] = { .type = NLA_U8 },
+	[CM_ATTR_RBID] = { .type = NLA_U8 },
+	[CM_ATTR_QFI] = { .type = NLA_U8 },
+	[CM_ATTR_PDU] = { .type = NLA_U8 },
+	[CM_ATTR_IN_PKT] = { .type = NLA_U8 },
+	[CM_ATTR_OUT_PKT] = { .type = NLA_U8 },
+	[CM_ATTR_VLAN_EN] = { .type = NLA_U8 },
+	[CM_ATTR_VLANID] = { .type = NLA_U16 },
+	[CM_ATTR_XLAT_EN] = { .type = NLA_U32 },
+	[CM_ATTR_XLAT_INSTANCE] = { .type = NLA_STRING },
+	[CM_ATTR_UPDATE_TUPLE] = { .type = NLA_U8 },
+};
+
+/* operation definition */
+struct genl_ops fp_cm_genl_ops[] = {
+	{
+		.cmd = CM_CMD_SET_PID,
+		.flags = 0,
+		.doit = fp_cm_genl_set_pid,
+	},
+	{
+		.cmd = CM_CMD_GET_TUPLE,
+		.flags = 0,
+		.doit = fp_cm_genl_get_tuple,
+	},
+	{
+		.cmd = CM_CMD_SET_TUPLE,
+		.flags = 0,
+		.doit = fp_cm_genl_set_tuple,
+	},
+	{
+		.cmd = CM_CMD_DEL_TUPLE,
+		.flags = 0,
+		.doit = fp_cm_genl_del_tuple,
+	}
+};
+
+static struct genl_family fp_cm_genl_family = {
+	.hdrsize = 0,
+	.name = "fp_cm",
+	.version = 1,
+	.maxattr = FP_CM_ATTR_MAX,
+	.policy = fp_cm_genl_policy,
+	.ops	 = fp_cm_genl_ops,
+	.n_ops	 = ARRAY_SIZE(fp_cm_genl_ops),
+};
+
+static void fp_cm_update_genl_pid(u32 pid)
+{
+	g_cm_nlpid = pid;
+}
+
+static u32 fp_cm_get_genl_pid(void)
+{
+	return g_cm_nlpid;
+}
+
+static int fp_cm_genl_set_pid(struct sk_buff *skb, struct genl_info *info)
+{
+	u32 pid;
+
+	if (!info->attrs[CM_ATTR_PID])
+		return -EINVAL;
+
+	pid = nla_get_u32(info->attrs[CM_ATTR_PID]);
+	printk("%s, get cm pid: %d\n", __func__, pid);
+	fp_cm_update_genl_pid(pid);
+	return 0;
+}
+
+static int __fp_cm_genl_fill_tuple_info(struct sk_buff *msg, struct nf_conntrack_tuple *tuple,
+				struct fpdb_entry *el, u32 portid, u32 seq, int flags, int add)
+{
+	void *hdr;
+	struct hh_cache *hh;
+	int hh_len;
+	u8 proto = 0, in_pkt = 0, out_pkt = 0, fwd = 0, nat = 0;
+	u16 nat_port = 0;
+	u32 nat_ip = 0;
+	char src_mac[ETH_ALEN]={0}, dst_mac[ETH_ALEN]={0};
+	struct fp_net_device *dst, *src;
+	struct vlan_dev_priv *vlan;
+	struct net_device *src_dev, *dst_dev;
+
+	hh = &el->hh;
+	hh_len = hh->hh_len;
+
+	if (add)
+		hdr = genlmsg_put(msg, portid, seq, &fp_cm_genl_family, flags,
+				  CM_CMD_GET_TUPLE);
+	else
+		hdr = genlmsg_put(msg, portid, seq, &fp_cm_genl_family, flags,
+				  CM_CMD_DEL_TUPLE);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (tuple->src.l3num == AF_INET) {
+		if (nla_put_u32(msg, CM_ATTR_SRC_IP, tuple->src.u3.ip) ||
+			nla_put_u32(msg, CM_ATTR_DST_IP, tuple->dst.u3.ip))
+			goto nla_put_failure;
+	} else if (tuple->src.l3num == AF_INET6) {
+		if (nla_put(msg, CM_ATTR_SRC_IP6, sizeof(struct in6_addr), &tuple->src.u3.in6) ||
+			nla_put(msg, CM_ATTR_DST_IP6, sizeof(struct in6_addr), &tuple->dst.u3.in6))
+			goto nla_put_failure;
+	}
+
+	if (tuple->dst.protonum == IPPROTO_UDP)
+		proto = TOE_UDP;
+	else if (tuple->dst.protonum == IPPROTO_TCP)
+		proto = TOE_TCP;
+	else
+		proto = TOE_MAX;
+
+	if (nla_put_u16(msg, CM_ATTR_SRC_PORT, ntohs(tuple->src.u.all)) ||
+		nla_put_u16(msg, CM_ATTR_DST_PORT, ntohs(tuple->dst.u.all)) ||
+		nla_put_u8(msg, CM_ATTR_PROTO, proto))
+		goto nla_put_failure;
+
+	src = rcu_dereference_bh(el->in_dev);
+	dst = rcu_dereference_bh(el->out_dev);
+
+	if (is_vlan_dev(src->dev)) {
+		vlan = vlan_dev_priv(src->dev);
+		src_dev = vlan->real_dev;
+		nla_put_u8(msg, CM_ATTR_VLAN_EN, 1);
+		nla_put_u16(msg, CM_ATTR_VLANID, vlan->vlan_id);
+	} else
+		src_dev = src->dev;
+
+	if (!strncasecmp(src_dev->name, "ccinet", 6))
+		in_pkt = PDU_PKT;
+	else if (!strncasecmp(src_dev->name, "usbnet", 6))
+		in_pkt = USB_PKT;
+	else if (!strncasecmp(src_dev->name, "wlan", 4))
+		in_pkt = WIFI_PKT;
+	else if (!strncasecmp(src_dev->name, "eth", 3))
+		in_pkt = ETH_PKT;
+	else
+		in_pkt = AP_PKT;
+
+	if (is_vlan_dev(dst->dev)) {
+		vlan = vlan_dev_priv(dst->dev);
+		dst_dev = vlan->real_dev;
+		nla_put_u8(msg, CM_ATTR_VLAN_EN, 1);
+		nla_put_u16(msg, CM_ATTR_VLANID, vlan->vlan_id);
+	} else
+		dst_dev = dst->dev;
+
+	if (!strncasecmp(dst_dev->name, "ccinet", 6))
+		out_pkt = PDU_PKT;
+	else if (!strncasecmp(dst_dev->name, "usbnet", 6))
+		out_pkt = USB_PKT;
+	else if (!strncasecmp(dst_dev->name, "wlan", 4))
+		out_pkt = WIFI_PKT;
+	else if (!strncasecmp(dst_dev->name, "eth", 3))
+		out_pkt = ETH_PKT;
+	else
+		out_pkt = AP_PKT;
+
+	fwd = (in_pkt != AP_PKT) && (out_pkt != AP_PKT);
+	if (fwd && (tuple->src.l3num == AF_INET)) {
+		if (in_pkt == PDU_PKT && (out_pkt == USB_PKT || out_pkt == WIFI_PKT || out_pkt == ETH_PKT)) {
+			nat = 1;
+			nat_ip = el->out_tuple.src.u3.ip;
+			nat_port = ntohs(el->out_tuple.src.u.all);
+		} else if ((in_pkt == USB_PKT || in_pkt == WIFI_PKT || in_pkt == ETH_PKT) && out_pkt == PDU_PKT) {
+			nat = 1;
+			nat_ip = el->out_tuple.dst.u3.ip;
+			nat_port = ntohs(el->out_tuple.dst.u.all);
+		} else
+			/* CP TOE WIFI/WIFI TOE CP no need nat */
+			nat = 0;
+	}
+
+	if (nla_put_u8(msg, CM_ATTR_IN_PKT, in_pkt) ||
+		nla_put_u8(msg, CM_ATTR_OUT_PKT, out_pkt) ||
+		nla_put_u8(msg, CM_ATTR_FWD, fwd) ||
+		nla_put_string(msg, CM_ATTR_DEVICE_NAME, dst->dev->name))
+		goto nla_put_failure;
+
+	if (tuple->src.l3num == AF_INET) {
+		if (nla_put_u8(msg, CM_ATTR_SNAT, nat) ||
+			nla_put_u16(msg, CM_ATTR_NAT_PORT, nat_port) ||
+			nla_put_u32(msg, CM_ATTR_NAT_IP, nat_ip))
+			goto nla_put_failure;
+	}
+
+	if (hh_len) {
+		if (likely(hh_len <= HH_DATA_MOD)) {
+			/* this is inlined by gcc */
+			char mac_header[HH_DATA_MOD];
+			memcpy(mac_header, hh->hh_data, HH_DATA_MOD);
+			memcpy(src_mac, &mac_header[HH_DATA_MOD-ETH_TYPE_LEN-ETH_ALEN], ETH_ALEN);
+			memcpy(dst_mac, &mac_header[HH_DATA_MOD-ETH_TYPE_LEN-ETH_ALEN*2], ETH_ALEN);
+		} else {
+			int hh_alen = HH_DATA_ALIGN(hh_len);
+			char *mac_header = kmalloc(hh_alen, GFP_ATOMIC);
+
+			memcpy(mac_header, hh->hh_data, hh_alen);
+			memcpy(src_mac, mac_header+(hh_alen-ETH_TYPE_LEN-ETH_ALEN), ETH_ALEN);
+			memcpy(dst_mac, mac_header+(hh_alen-ETH_TYPE_LEN-ETH_ALEN*2), ETH_ALEN);
+			kfree(mac_header);
+		}
+	}
+
+	if (nla_put(msg, CM_ATTR_SRC_MAC, ETH_ALEN, src_mac) ||
+		nla_put(msg, CM_ATTR_DST_MAC, ETH_ALEN, dst_mac))
+			goto nla_put_failure;
+
+	pr_debug("%s:\n in:%d, out:%d\n src_ip:0x%x\n dst_ip:0x%x\n src_port:%d\n dst_prot:%d\n"
+			" protocol:%d\n nat_port:%d\n nat_ip:0x%x\n fwd:%d\n snat:%d\n",
+			__func__, in_pkt, out_pkt, ntohl(tuple->src.u3.ip), ntohl(tuple->dst.u3.ip), ntohs(tuple->src.u.all),
+			ntohs(tuple->dst.u.all), proto, nat_port, ntohl(nat_ip), fwd, nat);
+
+	genlmsg_end(msg, hdr);
+	if (add)
+		el->nl_flag = 1;
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int __fp_cm_genl_fill_464xlat_info(struct sk_buff *msg, struct nf_conntrack_tuple *tuple,
+				struct fpdb_entry *el, u32 portid, u32 seq, int flags, int add)
+{
+	void *hdr;
+	struct hh_cache *hh;
+	int hh_len;
+	u8 proto = 0, in_pkt = 0, out_pkt = 0, fwd = 0, nat = 0;
+	u16 nat_port = 0;
+	u32 nat_ip = 0;
+	char src_mac[ETH_ALEN]={0}, dst_mac[ETH_ALEN]={0};
+	struct fp_net_device *dst, *src;
+	struct vlan_dev_priv *vlan;
+	struct net_device *src_dev, *dst_dev;
+	nat46_instance_t *nat46;
+	nat46_netdev_priv_t *dev_priv;
+
+	hh = &el->hh;
+	hh_len = hh->hh_len;
+
+	if (tuple->src.l3num == AF_INET6) {
+		el->nl_flag = 1;
+		return 0;
+	}
+
+	if (add)
+		hdr = genlmsg_put(msg, portid, seq, &fp_cm_genl_family, flags,
+				  CM_CMD_GET_TUPLE);
+	else
+		hdr = genlmsg_put(msg, portid, seq, &fp_cm_genl_family, flags,
+				  CM_CMD_DEL_TUPLE);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (tuple->src.l3num == AF_INET) {
+		if (nla_put_u32(msg, CM_ATTR_SRC_IP, tuple->src.u3.ip) ||
+			nla_put_u32(msg, CM_ATTR_DST_IP, tuple->dst.u3.ip))
+		goto nla_put_failure;
+	}
+
+	if (tuple->dst.protonum == IPPROTO_UDP)
+		proto = TOE_UDP;
+	else if (tuple->dst.protonum == IPPROTO_TCP)
+		proto = TOE_TCP;
+	else
+		proto = TOE_MAX;
+
+	if (nla_put_u16(msg, CM_ATTR_SRC_PORT, ntohs(tuple->src.u.all)) ||
+		nla_put_u16(msg, CM_ATTR_DST_PORT, ntohs(tuple->dst.u.all)) ||
+		nla_put_u8(msg, CM_ATTR_PROTO, proto))
+		goto nla_put_failure;
+
+	src = rcu_dereference_bh(el->in_dev);
+	dst = rcu_dereference_bh(el->out_dev);
+
+	if (is_vlan_dev(src->dev)) {
+		vlan = vlan_dev_priv(src->dev);
+		src_dev = vlan->real_dev;
+		nla_put_u8(msg, CM_ATTR_VLAN_EN, 1);
+		nla_put_u16(msg, CM_ATTR_VLANID, vlan->vlan_id);
+	} else
+		src_dev = src->dev;
+
+	if (is_nat46_dev(src_dev) && is_valid_nat46_instance(src_dev)) {
+		//RX:
+		in_pkt = PDU_PKT;
+		dev_priv = netdev_priv(src_dev);
+		nat46 = dev_priv->nat46;
+		pr_debug("%s:\nDL xlat enable\n, src:%pI6c, dst:%pI6c\n", __func__, nat46->pairs[0].remote.v6_pref.s6_addr32,
+				nat46->pairs[0].local.v6_pref.s6_addr32);
+
+		if (nla_put_u32(msg, CM_ATTR_XLAT_EN, 1) ||
+			nla_put(msg, CM_ATTR_SRC_IP6, sizeof(struct in6_addr), &nat46->pairs[0].remote.v6_pref) ||
+			nla_put(msg, CM_ATTR_DST_IP6, sizeof(struct in6_addr), &nat46->pairs[0].local.v6_pref))
+			goto nla_put_failure;
+	} else if (!strncasecmp(src_dev->name, "ccinet", 6))
+		in_pkt = PDU_PKT;
+	else if (!strncasecmp(src_dev->name, "usbnet", 6))
+		in_pkt = USB_PKT;
+	else if (!strncasecmp(src_dev->name, "wlan", 4))
+		in_pkt = WIFI_PKT;
+	else if (!strncasecmp(src_dev->name, "eth", 3))
+		in_pkt = ETH_PKT;
+	else
+		in_pkt = AP_PKT;
+
+	if (is_vlan_dev(dst->dev)) {
+		vlan = vlan_dev_priv(dst->dev);
+		dst_dev = vlan->real_dev;
+		nla_put_u8(msg, CM_ATTR_VLAN_EN, 1);
+		nla_put_u16(msg, CM_ATTR_VLANID, vlan->vlan_id);
+	} else
+		dst_dev = dst->dev;
+
+	if (is_nat46_dev(dst_dev) && is_valid_nat46_instance(dst_dev)) {
+		//TX
+		out_pkt = PDU_PKT;
+		dev_priv = netdev_priv(dst_dev);
+		nat46 = dev_priv->nat46;
+		pr_debug("%s:\nUL xlat enable\n, xlat instance: %s, src:%pI6c, dst:%pI6c\n", __func__, dst_dev->name,
+				nat46->pairs[0].local.v6_pref.s6_addr32, nat46->pairs[0].remote.v6_pref.s6_addr32);
+
+		if (nla_put_u32(msg, CM_ATTR_XLAT_EN, 1) ||
+			nla_put_string(msg, CM_ATTR_XLAT_INSTANCE, dst_dev->name) ||
+			nla_put(msg, CM_ATTR_SRC_IP6, sizeof(struct in6_addr), &nat46->pairs[0].local.v6_pref) ||
+			nla_put(msg, CM_ATTR_DST_IP6, sizeof(struct in6_addr), &nat46->pairs[0].remote.v6_pref))
+			goto nla_put_failure;
+	} else if (!strncasecmp(dst_dev->name, "ccinet", 6))
+		out_pkt = PDU_PKT;
+	else if (!strncasecmp(dst_dev->name, "usbnet", 6))
+		out_pkt = USB_PKT;
+	else if (!strncasecmp(dst_dev->name, "wlan", 4))
+		out_pkt = WIFI_PKT;
+	else if (!strncasecmp(dst_dev->name, "eth", 3))
+		out_pkt = ETH_PKT;
+	else
+		out_pkt = AP_PKT;
+
+	fwd = (in_pkt != AP_PKT) && (out_pkt != AP_PKT);
+	if (fwd && (tuple->src.l3num == AF_INET)) {
+		if (in_pkt == PDU_PKT && (out_pkt == USB_PKT || out_pkt == WIFI_PKT || out_pkt == ETH_PKT)) {
+			nat = 1;
+			nat_ip = el->out_tuple.src.u3.ip;
+			nat_port = ntohs(el->out_tuple.src.u.all);
+		} else if ((in_pkt == USB_PKT || in_pkt == WIFI_PKT || in_pkt == ETH_PKT) && out_pkt == PDU_PKT) {
+			nat = 1;
+			nat_ip = el->out_tuple.dst.u3.ip;
+			nat_port = ntohs(el->out_tuple.dst.u.all);
+		} else
+			/* not support*/
+			goto nla_put_failure;
+	}
+
+	if (nla_put_u8(msg, CM_ATTR_IN_PKT, in_pkt) ||
+		nla_put_u8(msg, CM_ATTR_OUT_PKT, out_pkt) ||
+		nla_put_u8(msg, CM_ATTR_FWD, fwd) ||
+		nla_put_string(msg, CM_ATTR_DEVICE_NAME, dst_dev->name))
+		goto nla_put_failure;
+
+	if (tuple->src.l3num == AF_INET) {
+		if (nla_put_u8(msg, CM_ATTR_SNAT, nat) ||
+			nla_put_u16(msg, CM_ATTR_NAT_PORT, nat_port) ||
+			nla_put_u32(msg, CM_ATTR_NAT_IP, nat_ip))
+			goto nla_put_failure;
+	}
+
+	if (hh_len) {
+		if (likely(hh_len <= HH_DATA_MOD)) {
+			/* this is inlined by gcc */
+			char mac_header[HH_DATA_MOD];
+			memcpy(mac_header, hh->hh_data, HH_DATA_MOD);
+			memcpy(src_mac, &mac_header[HH_DATA_MOD-ETH_TYPE_LEN-ETH_ALEN], ETH_ALEN);
+			memcpy(dst_mac, &mac_header[HH_DATA_MOD-ETH_TYPE_LEN-ETH_ALEN*2], ETH_ALEN);
+		} else {
+			int hh_alen = HH_DATA_ALIGN(hh_len);
+			char *mac_header = kmalloc(hh_alen, GFP_ATOMIC);
+
+			memcpy(mac_header, hh->hh_data, hh_alen);
+			memcpy(src_mac, mac_header+(hh_alen-ETH_TYPE_LEN-ETH_ALEN), ETH_ALEN);
+			memcpy(dst_mac, mac_header+(hh_alen-ETH_TYPE_LEN-ETH_ALEN*2), ETH_ALEN);
+			kfree(mac_header);
+		}
+	}
+
+	if (nla_put(msg, CM_ATTR_SRC_MAC, ETH_ALEN, src_mac) ||
+		nla_put(msg, CM_ATTR_DST_MAC, ETH_ALEN, dst_mac))
+			goto nla_put_failure;
+
+	pr_debug("%s:\n in:%d, out:%d\n src_ip:0x%x\n dst_ip:0x%x\n src_port:%d\n dst_prot:%d\n"
+			" protocol:%d\n nat_port:%d\n nat_ip:0x%x\n fwd:%d\n snat:%d\n\n",
+			__func__, in_pkt, out_pkt, ntohl(tuple->src.u3.ip), ntohl(tuple->dst.u3.ip), ntohs(tuple->src.u.all),
+			ntohs(tuple->dst.u.all), proto, nat_port, ntohl(nat_ip), fwd, nat);
+
+	genlmsg_end(msg, hdr);
+	el->nl_flag = 1;
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int fp_cm_genl_fill_tuple_info(struct sk_buff *msg, struct nf_conntrack_tuple *tuple,
+				struct fpdb_entry *el, u32 portid, int add)
+{
+	struct fp_net_device *dst, *src;
+
+	if (unlikely(!tuple) || unlikely(!el))
+		return -EMSGSIZE;
+
+	src = rcu_dereference_bh(el->in_dev);
+	dst = rcu_dereference_bh(el->out_dev);
+	if (!src || !dst)
+		return -EMSGSIZE;
+
+	if (is_nat46_dev(src->dev) || is_nat46_dev(dst->dev)) {
+		return __fp_cm_genl_fill_464xlat_info(msg, tuple, el, portid, 0, 0, add);
+	} else
+		return __fp_cm_genl_fill_tuple_info(msg, tuple, el, portid, 0, 0, add);
+}
+
+static int fp_cm_genl_fill_tuple_info_for_test(struct sk_buff *msg, u32 portid, u32 seq,
+				int flags, int add)
+{
+	void *hdr;
+	struct in6_addr addr;
+	char mac[ETH_ALEN] = {0x0, 0x2, 0x3, 0x4, 0x5, 0x6};
+
+	if (add)
+		hdr = genlmsg_put(msg, portid, seq, &fp_cm_genl_family, flags,
+				  CM_CMD_GET_TUPLE);
+	else
+		hdr = genlmsg_put(msg, portid, seq, &fp_cm_genl_family, flags,
+				  CM_CMD_DEL_TUPLE);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	memset(&addr.s6_addr, 6, sizeof(struct in6_addr));
+
+	if (nla_put_u32(msg, CM_ATTR_SRC_IP, 0xC0A80101) ||
+		nla_put_u32(msg, CM_ATTR_DST_IP, 0xC0A80102) ||
+		/* nla_put(msg, CM_ATTR_SRC_IP6, sizeof(struct in6_addr), &addr) ||
+		nla_put(msg, CM_ATTR_DST_IP6, sizeof(struct in6_addr), &addr) || */
+		nla_put_u16(msg, CM_ATTR_SRC_PORT, 0x64) ||
+		nla_put_u16(msg, CM_ATTR_DST_PORT, 0xC8) ||
+		nla_put_u8(msg, CM_ATTR_PROTO, TOE_TCP) ||
+		nla_put_u16(msg, CM_ATTR_NAT_PORT, 0x64) ||
+		nla_put_u32(msg, CM_ATTR_NAT_IP, 0xC0A8010A) ||
+		nla_put(msg, CM_ATTR_SRC_MAC, ETH_ALEN, mac) ||
+		nla_put(msg, CM_ATTR_DST_MAC, ETH_ALEN, mac) ||
+		nla_put_string(msg, CM_ATTR_DEVICE_NAME, "ccinet0") ||
+		nla_put_u8(msg, CM_ATTR_IN_PKT, PDU_PKT) ||
+		nla_put_u8(msg, CM_ATTR_OUT_PKT, USB_PKT) ||
+		nla_put_u8(msg, CM_ATTR_FWD, 1) ||
+		nla_put_u8(msg, CM_ATTR_SNAT, 1) ||
+		nla_put_u8(msg, CM_ATTR_VLAN_EN, 1) ||
+		nla_put_u16(msg, CM_ATTR_VLANID, 0x64))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int fp_cm_genl_set_tuple(struct sk_buff *skb, struct genl_info *info)
+{
+	struct toe_tuple_buff toe_tuple, toe_tuple_tmp;
+	char dev_name[DEVICE_NAME_MAXSIZE] = {0};
+	char src_mac[ETH_ALEN]={0}, dst_mac[ETH_ALEN]={0};
+	char xlat_instance[16] = {0};
+	struct in6_addr *src_ip6 = NULL;
+	struct in6_addr *dst_ip6 = NULL;
+	u32 src_ip, dst_ip, nat_ip, xlat_en = 0;
+	u16 src_port, dst_port, nat_port, vlanid;
+	u8 rx_tx, prot, fwd, nat, in_pkt, out_pkt, pdu, qfi, rbid, mcid, vlan_en;
+	u8 update = 0;
+
+	if (!info->attrs[CM_ATTR_MCID] ||
+		!info->attrs[CM_ATTR_RBID] ||
+		!info->attrs[CM_ATTR_QFI] ||
+		!info->attrs[CM_ATTR_PDU])
+		return -EINVAL;
+
+	memset(&toe_tuple, 0, sizeof(struct toe_tuple_buff));
+	memset(&toe_tuple_tmp, 0, sizeof(struct toe_tuple_buff));
+
+	if (info->attrs[CM_ATTR_SRC_IP])
+		src_ip = nla_get_u32(info->attrs[CM_ATTR_SRC_IP]);
+
+	if (info->attrs[CM_ATTR_DST_IP])
+		dst_ip = nla_get_u32(info->attrs[CM_ATTR_DST_IP]);
+
+	if (info->attrs[CM_ATTR_SRC_IP6]) {
+		src_ip6 = nla_data(info->attrs[CM_ATTR_SRC_IP6]);
+		pr_debug("%s, src_ip6=%pI6c\n", __func__, src_ip6->s6_addr32);
+	}
+
+	if (info->attrs[CM_ATTR_DST_IP6]) {
+		dst_ip6 = nla_data(info->attrs[CM_ATTR_DST_IP6]);
+		pr_debug("%s, dst_ip6=%pI6c\n", __func__, dst_ip6->s6_addr32);
+	}
+
+	if (info->attrs[CM_ATTR_SRC_PORT])
+		src_port = nla_get_u16(info->attrs[CM_ATTR_SRC_PORT]);
+
+	if (info->attrs[CM_ATTR_DST_PORT])
+		dst_port = nla_get_u16(info->attrs[CM_ATTR_DST_PORT]);
+
+	if (info->attrs[CM_ATTR_PROTO])
+		prot = nla_get_u8(info->attrs[CM_ATTR_PROTO]);
+
+	if (info->attrs[CM_ATTR_SRC_MAC]) {
+		memcpy(src_mac, nla_data(info->attrs[CM_ATTR_SRC_MAC]), ETH_ALEN);
+		pr_debug("%s, src_mac: %02x%02x-%02x%02x-%02x%02x\n", __func__,
+			src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5]);
+	}
+	if (info->attrs[CM_ATTR_DST_MAC]) {
+		memcpy(dst_mac, nla_data(info->attrs[CM_ATTR_DST_MAC]), ETH_ALEN);
+		pr_debug("%s, dst_mac: %02x%02x-%02x%02x-%02x%02x\n", __func__,
+			dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], dst_mac[4], dst_mac[5]);
+	}
+
+	if (info->attrs[CM_ATTR_SNAT])
+		nat = nla_get_u8(info->attrs[CM_ATTR_SNAT]);
+
+	if (info->attrs[CM_ATTR_FWD])
+		fwd = nla_get_u8(info->attrs[CM_ATTR_FWD]);
+
+	if (info->attrs[CM_ATTR_NAT_PORT])
+		nat_port = nla_get_u16(info->attrs[CM_ATTR_NAT_PORT]);
+
+	if (info->attrs[CM_ATTR_NAT_IP])
+		nat_ip = nla_get_u32(info->attrs[CM_ATTR_NAT_IP]);
+
+	if (info->attrs[CM_ATTR_DEVICE_NAME]) {
+		/*nla_len = strlen(dev_name) + 1 + NLA_HDRLEN;*/
+		memcpy(dev_name, (char *)nla_data(info->attrs[CM_ATTR_DEVICE_NAME]),
+					info->attrs[CM_ATTR_DEVICE_NAME]->nla_len - NLA_HDRLEN -1);
+		pr_debug("%s, dev_name: %s\n", __func__, dev_name);
+	}
+
+	if (info->attrs[CM_ATTR_MCID])
+		mcid = nla_get_u8(info->attrs[CM_ATTR_MCID]);
+
+	if (info->attrs[CM_ATTR_RBID])
+		rbid = nla_get_u8(info->attrs[CM_ATTR_RBID]);
+
+	if (info->attrs[CM_ATTR_QFI])
+		qfi = nla_get_u8(info->attrs[CM_ATTR_QFI]);
+
+	if (info->attrs[CM_ATTR_PDU])
+		pdu = nla_get_u8(info->attrs[CM_ATTR_PDU]);
+
+	if (info->attrs[CM_ATTR_IN_PKT])
+		in_pkt = nla_get_u8(info->attrs[CM_ATTR_IN_PKT]);
+
+	if (info->attrs[CM_ATTR_OUT_PKT])
+		out_pkt = nla_get_u8(info->attrs[CM_ATTR_OUT_PKT]);
+
+	if (info->attrs[CM_ATTR_VLAN_EN])
+		vlan_en = nla_get_u8(info->attrs[CM_ATTR_VLAN_EN]);
+
+	if (info->attrs[CM_ATTR_VLANID])
+		vlanid = nla_get_u16(info->attrs[CM_ATTR_VLANID]);
+
+	if (info->attrs[CM_ATTR_XLAT_EN])
+		xlat_en = nla_get_u32(info->attrs[CM_ATTR_XLAT_EN]);
+
+	if (info->attrs[CM_ATTR_XLAT_INSTANCE]) {
+		memcpy(xlat_instance, (char *)nla_data(info->attrs[CM_ATTR_XLAT_INSTANCE]),
+					info->attrs[CM_ATTR_XLAT_INSTANCE]->nla_len - NLA_HDRLEN -1);
+		pr_debug("%s, xlat_instance: %s\n", __func__, xlat_instance);
+	}
+
+	if (info->attrs[CM_ATTR_UPDATE_TUPLE])
+		update = nla_get_u8(info->attrs[CM_ATTR_UPDATE_TUPLE]);
+
+	/* rx: cp -> ap, usb, wifi */
+	if (in_pkt == PDU_PKT)
+		rx_tx = 1;
+	/* rx: ap -> usb, ap -> wifi */
+	else if ((in_pkt == AP_PKT) && (out_pkt != PDU_PKT))
+		rx_tx = 1;
+	/*
+	 * tx:
+	 * ap -> cp
+	 * usb/wifi -> ap/cp */
+	else
+		rx_tx = 0;
+
+	if (src_ip6 && dst_ip6 && !xlat_en) {
+		memcpy(toe_tuple.src_ip6, src_ip6->s6_addr32, sizeof(toe_tuple.src_ip6));
+		memcpy(toe_tuple.dst_ip6, dst_ip6->s6_addr32, sizeof(toe_tuple.src_ip6));
+		toe_tuple.ip6 = 1;
+	} else {
+		toe_tuple.src_ip = ntohl(src_ip);
+		toe_tuple.dst_ip = ntohl(dst_ip);
+		toe_tuple.ip6 = 0;
+		toe_tuple.nat = nat;
+		toe_tuple.nat_port = nat_port;
+		toe_tuple.nat_ip = ntohl(nat_ip);
+	}
+
+	if (vlan_en) {
+		toe_tuple.vlan_en = vlan_en;
+		toe_tuple.vlanid = vlanid;
+	}
+
+	toe_tuple.src_port = src_port;
+	toe_tuple.dst_port = dst_port;
+	toe_tuple.prot = prot;
+	toe_tuple.urg = 0;
+	toe_tuple.fwd = fwd;
+	toe_tuple.crc = 1;
+	toe_tuple.rxtx = rx_tx;
+	toe_tuple.out_pkt = out_pkt;
+	toe_tuple.pdu = pdu;
+	toe_tuple.qfi = qfi ;
+	toe_tuple.rbid = rbid ;
+	toe_tuple.mcid = mcid;
+	toe_tuple.xlat_en = xlat_en;
+	toe_tuple.in_pkt = in_pkt;
+	memcpy(toe_tuple.smac, src_mac, sizeof(toe_tuple.smac));
+	memcpy(toe_tuple.dmac, dst_mac, sizeof(toe_tuple.dmac));
+	memcpy(toe_tuple.xlat_instance, xlat_instance, sizeof(xlat_instance));
+
+	pr_debug("%s:\n in:%d, out:%d, src_port:%d, dst_prot:%d,"
+			" protocol:%d, nat_port:%d, nat_ip:0x%x,fwd:%d,snat:%d, xlat instance:%s\n\n",
+			__func__, in_pkt, out_pkt, src_port, dst_port, prot,
+			nat_port, nat_ip, fwd, nat, xlat_instance);
+
+	if (update) {
+		memcpy(&toe_tuple_tmp, &toe_tuple, sizeof(struct toe_tuple_buff));
+		toe_del_connection(&toe_tuple_tmp);
+	}
+	toe_add_connection(&toe_tuple);
+
+	return 0;
+}
+
+static int fp_cm_genl_get_tuple(struct sk_buff *skb, struct genl_info *info)
+{
+	struct sk_buff *msg;
+	int rc;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+	if (!msg)
+		return -ENOMEM;
+
+	rc = fp_cm_genl_fill_tuple_info_for_test(msg, info->snd_portid, info->snd_seq, 0, 1);
+	if (rc < 0)
+		goto out_free;
+
+	return genlmsg_reply(msg, info);
+
+out_free:
+	nlmsg_free(msg);
+	return rc;
+}
+
+static int fp_cm_genl_del_tuple(struct sk_buff *skb, struct genl_info *info)
+{
+	struct sk_buff *msg;
+	int rc;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+	if (!msg)
+		return -ENOMEM;
+
+	rc = fp_cm_genl_fill_tuple_info_for_test(msg, info->snd_portid, info->snd_seq, 0, 0);
+	if (rc < 0)
+		goto out_free;
+
+	return genlmsg_reply(msg, info);
+
+out_free:
+	nlmsg_free(msg);
+	return rc;
+}
+
+static int __fp_eth_wan_set_tuple(struct nf_conntrack_tuple *tuple,
+				struct fpdb_entry *el, int add)
+{
+	struct hh_cache *hh;
+	int hh_len;
+
+	struct fp_net_device *dst, *src;
+	struct net_device *src_dev, *dst_dev;
+	struct toe_tuple_buff toe_tuple_ul; /* eth -> cp */
+	struct toe_tuple_buff toe_tuple_dl; /* cp->usb/wifi/eth */
+	struct vlan_dev_priv *vlan;
+	u8 src_vlan_en = 0, dst_vlan_en = 0;
+	u16 src_vlanid, dst_vlanid;
+
+	hh = &el->hh;
+	hh_len = hh->hh_len;
+
+	src = rcu_dereference_bh(el->in_dev);
+	dst = rcu_dereference_bh(el->out_dev);
+
+	if (is_vlan_dev(src->dev)) {
+		vlan = vlan_dev_priv(src->dev);
+		src_dev = vlan->real_dev;
+		src_vlan_en = 1;
+		src_vlanid = vlan->vlan_id;
+	} else
+		src_dev = src->dev;
+
+	if (is_vlan_dev(dst->dev)) {
+		vlan = vlan_dev_priv(dst->dev);
+		dst_dev = vlan->real_dev;
+		dst_vlan_en = 1;
+		dst_vlanid = vlan->vlan_id;
+	} else
+		dst_dev = dst->dev;
+
+	if (src->br) {
+		/* if src dev is under bridge such as usb/eth(lan)/wifi */
+
+		if (strncasecmp(dst_dev->name, "eth", 3))
+			/* dst dev is not eth */
+			return -1;
+		else {
+			if (!dst->br) {
+				/* usb/eth(lan)/wifi -> eth(wan)
+				 * don't add the ul path to toe,
+				 * and no need to send tuple to cm.
+				*/
+				return 0;
+			} else {
+				/* dst is eth lan */
+				return -1;
+			}
+		}
+	} else {
+		if (strncasecmp(src_dev->name, "eth", 3))
+			/* src dev is not eth */
+			return -1;
+	}
+
+	/* only eth wan as input go here */
+	printk(KERN_DEBUG "%s: %s -> %s\n", __func__,
+		src_dev->name, dst_dev->name);
+
+	memset(&toe_tuple_ul, 0, sizeof(toe_tuple_ul));
+	memset(&toe_tuple_dl, 0, sizeof(toe_tuple_dl));
+	toe_tuple_ul.in_pkt = ETH_PKT;
+	toe_tuple_ul.out_pkt = PDU_PKT;
+	toe_tuple_dl.in_pkt = PDU_PKT;
+
+	if (tuple->dst.protonum == IPPROTO_UDP)
+		toe_tuple_ul.prot = TOE_UDP;
+	else if (tuple->dst.protonum == IPPROTO_TCP)
+		toe_tuple_ul.prot = TOE_TCP;
+	else
+		return 1;
+	toe_tuple_dl.prot = toe_tuple_ul.prot;
+
+	if (!strncasecmp(dst_dev->name, "usbnet", 6))
+		toe_tuple_dl.out_pkt = USB_PKT;
+	else if (!strncasecmp(dst_dev->name, "wlan", 4))
+		toe_tuple_dl.out_pkt = WIFI_PKT;
+	else if (!strncasecmp(dst_dev->name, "eth", 3))
+		toe_tuple_dl.out_pkt = ETH_PKT;
+	else
+		return 2;
+
+	if (tuple->src.l3num == AF_INET) {
+		toe_tuple_ul.src_ip = ntohl(tuple->src.u3.ip);
+		toe_tuple_ul.dst_ip = ntohl(tuple->dst.u3.ip);
+
+		toe_tuple_ul.nat_ip = ntohl(tuple->dst.u3.ip);
+		toe_tuple_ul.nat_port = ntohs(el->out_tuple.dst.u.all);
+		toe_tuple_ul.nat = 0;
+
+		toe_tuple_dl.src_ip = toe_tuple_ul.src_ip;
+		toe_tuple_dl.dst_ip = toe_tuple_ul.dst_ip;
+		toe_tuple_dl.nat_ip = ntohl(el->out_tuple.src.u3.ip);
+		toe_tuple_dl.nat_port = ntohs(el->out_tuple.src.u.all);
+		toe_tuple_dl.nat = 1;
+	} else if (tuple->src.l3num == AF_INET6) {
+		memcpy(toe_tuple_ul.src_ip6,
+			tuple->src.u3.in6.s6_addr32, sizeof(toe_tuple_ul.src_ip6));
+		memcpy(toe_tuple_ul.dst_ip6,
+			tuple->dst.u3.in6.s6_addr32, sizeof(toe_tuple_ul.dst_ip6));
+		toe_tuple_dl.ip6 = toe_tuple_ul.ip6 = 1;
+	}
+
+	toe_tuple_dl.src_port = toe_tuple_ul.src_port = ntohs(tuple->src.u.all);
+	toe_tuple_dl.dst_port = toe_tuple_ul.dst_port = ntohs(tuple->dst.u.all);
+	toe_tuple_dl.crc = toe_tuple_ul.crc = 1;
+	toe_tuple_dl.fwd = toe_tuple_ul.fwd = 1;
+	toe_tuple_ul.rxtx = 0; /* ul is tx */
+	toe_tuple_dl.rxtx = 1; /* dl is rx */
+	toe_tuple_ul.pdu = 0xff;
+	toe_tuple_ul.qfi = 0xff;
+	toe_tuple_ul.rbid = 0xff;
+	toe_tuple_ul.mcid = 0xff;
+
+	if (src_vlan_en) {
+		toe_tuple_ul.vlan_en = 1;
+		toe_tuple_ul.vlanid = src_vlanid;
+	}
+
+	if (dst_vlan_en) {
+		toe_tuple_dl.vlan_en = 1;
+		toe_tuple_dl.vlanid = dst_vlanid;
+	}
+
+	if (hh_len) {
+		if (likely(hh_len <= HH_DATA_MOD)) {
+			/* this is inlined by gcc */
+			char mac_header[HH_DATA_MOD];
+			memcpy(mac_header, hh->hh_data, HH_DATA_MOD);
+			memcpy(toe_tuple_ul.smac,
+				&mac_header[HH_DATA_MOD-ETH_TYPE_LEN-ETH_ALEN], ETH_ALEN);
+			memcpy(toe_tuple_ul.dmac,
+				&mac_header[HH_DATA_MOD-ETH_TYPE_LEN-ETH_ALEN*2], ETH_ALEN);
+		} else {
+			int hh_alen = HH_DATA_ALIGN(hh_len);
+			char *mac_header = kmalloc(hh_alen, GFP_ATOMIC);
+
+			memcpy(mac_header, hh->hh_data, hh_alen);
+			memcpy(toe_tuple_ul.smac,
+				mac_header+(hh_alen-ETH_TYPE_LEN-ETH_ALEN), ETH_ALEN);
+			memcpy(toe_tuple_ul.dmac,
+				mac_header+(hh_alen-ETH_TYPE_LEN-ETH_ALEN*2), ETH_ALEN);
+			kfree(mac_header);
+		}
+
+		memcpy(toe_tuple_dl.smac, toe_tuple_ul.smac, ETH_ALEN);
+		memcpy(toe_tuple_dl.dmac, toe_tuple_ul.dmac, ETH_ALEN);
+	}
+
+	if (add) {
+		if (toe_add_connection(&toe_tuple_ul) >= 0 &&
+			toe_add_connection(&toe_tuple_dl) >= 0) {
+			el->nl_flag = 1;
+			return 0;
+		}
+	} else {
+		toe_del_connection(&toe_tuple_ul);
+		toe_del_connection(&toe_tuple_dl);
+		el->nl_flag = 0;
+		return 0;
+	}
+
+	return -1;
+}
+
+int fp_cm_genl_send_tuple(struct nf_conntrack_tuple *tuple, struct fpdb_entry *el,
+									 int add, int len)
+{
+	struct sk_buff *msg;
+	int rc;
+	u32 pid, ms;
+
+	if (add) {
+		if (!el->detect_speed_jiffies)
+			el->detect_speed_jiffies = jiffies;
+		el->detect_speed_bytes += len;
+		ms = jiffies_to_msecs(jiffies - el->detect_speed_jiffies);
+		if (ms >= 1000) {
+			el->speed = (el->detect_speed_bytes / ms) << 3; /* kbps */
+			el->detect_speed_jiffies = 0;
+			el->detect_speed_bytes = 0;
+		}
+
+		if (el->speed < speed_thresh)
+			return 0;
+	}
+
+	rc = __fp_eth_wan_set_tuple(tuple, el, add);
+	if (rc >= 0)
+		return rc;
+
+	pid = fp_cm_get_genl_pid();
+	if (pid == -1)
+		return -1;
+
+	msg = nlmsg_new(FP_CM_NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+	if (!msg)
+		return -ENOMEM;
+
+	rc = fp_cm_genl_fill_tuple_info(msg, tuple, el, pid, add);
+	if (rc < 0)
+		goto out_free;
+
+	rc = genlmsg_unicast(&init_net, msg, pid);
+	if (rc) {
+		pr_err_ratelimited("%s genlmsg_unicast fail, rc: %d", __func__, rc);
+		el->nl_flag = 0;
+	}
+
+	return rc;
+out_free:
+	nlmsg_free(msg);
+	return rc;
+}
+
+EXPORT_SYMBOL(fp_cm_genl_send_tuple);
+
+static ssize_t speed_thresh_show(struct fastpath_module *m, char *buf)
+{
+	return sprintf(buf, "speed_thresh: %dKbps\n", speed_thresh);
+}
+
+static ssize_t speed_thresh_store(struct fastpath_module *m, const char *buf,
+			      size_t count)
+{
+	sscanf(buf, "%u", &speed_thresh);
+	return count;
+}
+
+static FP_ATTR(speed_thresh, S_IRUGO|S_IWUSR, speed_thresh_show, speed_thresh_store);
+
+static struct attribute *fp_cm_attrs[] = {
+	&fp_attr_speed_thresh.attr,
+	NULL, /* need to NULL terminate the list of attributes */
+};
+
+void fp_cm_release(struct kobject *kobj)
+{
+	struct fastpath_module *module = to_fpmod(kobj);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+			int i;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	for (i = ARRAY_SIZE(fp_cm_genl_ops) - 1; i >= 0; i--)
+		genl_unregister_ops(&fp_cm_genl_family, &fp_cm_genl_ops[i]);
+#endif
+		genl_unregister_family(&fp_cm_genl_family);
+
+		printk(KERN_ERR "fp_cm gennetlink unregister.....\n");
+		kfree(module);
+}
+
+static struct kobj_type ktype_fp_cm = {
+	.sysfs_ops	= &fp_sysfs_ops,
+	.default_attrs	= fp_cm_attrs,
+	.release	= fp_cm_release,
+};
+
+static int fp_cm_probe(struct fastpath_module *module)
+{
+	int ret;
+
+	module->priv = NULL;
+	snprintf(module->name, sizeof(module->name), "fp_cm");
+
+	ret = genl_register_family(&fp_cm_genl_family);
+
+	kobject_init(&module->kobj, &ktype_fp_cm);
+	ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
+	if (ret < 0) {
+		pr_err("kobject_add failed (%d)\n", ret);
+		goto err_kobject_add;
+	}
+
+	kobject_uevent(&module->kobj, KOBJ_ADD);
+
+	pr_debug("fp_cm probed\n");
+	pr_info("fp_cm gennetlink register success!!!\n");
+
+	return 0;
+
+err_kobject_add:
+	kobject_put(&module->kobj);
+	genl_unregister_family(&fp_cm_genl_family);
+
+	return ret;
+}
+
+static int fp_cm_remove(struct fastpath_module *module)
+{
+	kobject_put(&module->kobj);
+
+	printk(KERN_ERR "fp_cm removed\n");
+	return 0;
+}
+
+struct fastpath_module_ops fp_cm_ops = {
+	.probe = fp_cm_probe,
+	.remove = fp_cm_remove,
+};
+
diff --git a/package/kernel/mfp/files/fp_common.h b/package/kernel/mfp/files/fp_common.h
new file mode 100644
index 0000000..5c6850c
--- /dev/null
+++ b/package/kernel/mfp/files/fp_common.h
@@ -0,0 +1,301 @@
+/**
+ *
+ *  (C) Copyright 2009-2016 Marvell International Ltd. All Rights Reserved
+ *
+ *  MARVELL CONFIDENTIAL
+ *  The source code contained or described herein and all documents related to
+ *  the source code ("Material") are owned by Marvell International Ltd or its
+ *  suppliers or licensors. Title to the Material remains with Marvell
+ *  International Ltd or its suppliers and licensors. The Material contains
+ *  trade secrets and proprietary and confidential information of Marvell or its
+ *  suppliers and licensors. The Material is protected by worldwide copyright
+ *  and trade secret laws and treaty provisions. No part of the Material may be
+ *  used, copied, reproduced, modified, published, uploaded, posted,
+ *  transmitted, distributed, or disclosed in any way without Marvell's prior
+ *  express written permission.
+ *
+ *  No license under any patent, copyright, trade secret or other intellectual
+ *  property right is granted to or conferred upon you by disclosure or delivery
+ *  of the Materials, either expressly, by implication, inducement, estoppel or
+ *  otherwise. Any license under such intellectual property rights must be
+ *  express and approved by Marvell in writing.
+ *
+ */
+
+#ifndef FP_COMMON_H
+#define FP_COMMON_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/proc_fs.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/genetlink.h>
+#include <linux/if_arp.h>
+#include <net/ip_fib.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+
+#include <asm/cacheflush.h>
+#include <linux/timer.h>
+
+
+#define FP_NO_DEBUG_LVL	(0)
+#define FP_FATAL_LVL		(1)
+#define FP_ERR_LVL		(2)
+#define FP_WARNING_LVL		(3)
+#define FP_INFO_LVL		(4)
+#define FP_DEBUG_LVL		(5)
+
+/* Set fastpath debug level */
+#if !defined(FP_DEBUG_LEVEL)
+#define FP_DEBUG_LEVEL		(FP_ERR_LVL)
+#endif
+#define MAX_DEBUG_PRINT_SIZE	(1000)
+
+/* runtime debug levels */
+enum {
+	/* Warnings as Errors */
+	DBG_WARN_AS_ERR_BIT = 0,
+	DBG_WARN_AS_ERR = (1 << DBG_WARN_AS_ERR_BIT),
+	/* Entry Debug Info */
+	DBG_INFO_BIT = 1,
+	DBG_INFO = (1 << DBG_INFO_BIT),
+	/* Entry Extra Debug Info (for future use) */
+	DBG_INFO_EXT_BIT = 2,
+	DBG_INFO_EXT = (1 << DBG_INFO_EXT_BIT),
+	/* Entry Trace Logging (may impact performance) */
+	DBG_TRACE_LOG_BIT = 3,
+	DBG_TRACE_LOG = (1 << DBG_TRACE_LOG_BIT),
+
+	DBG_LEVEL_END_BIT = 4,
+	DBG_LEVEL_END = (1 << DBG_LEVEL_END_BIT),
+};
+
+void fpdb_dump_tuple(char *msg, struct nf_conntrack_tuple *t);
+
+#if FP_DEBUG_LEVEL >= FP_ERR_LVL
+#define FP_ERR_DUMP_ENTRY(msg, el)                                     \
+			fpdb_dump_entry(msg, el)
+#define FP_ERR_DUMP_CONTRACK(msg, el)                                  \
+			learner_nc_dump_conntrack_tuple(msg, el)
+#define FP_ERR_DUMP_TUPLE(msg, el)                                     \
+			fpdb_dump_tuple(msg, el)
+#else
+#define FP_ERR_DUMP_ENTRY(msg, el) do {} while (0)
+#define FP_ERR_DUMP_CONTRACK(msg, el) do {} while (0)
+#define FP_ERR_DUMP_TUPLE(msg, el) do {} while (0)
+#endif
+
+#if FP_DEBUG_LEVEL >= FP_DEBUG_LVL
+#define FP_DEBUG_DUMP_ENTRY(msg, el)                                   \
+			fpdb_dump_entry(msg, el)
+#define FP_DEBUG_DUMP_TUPLE(msg, el)                                   \
+			fpdb_dump_tuple(msg, el)
+#define FP_DEBUG_DUMP_CONTRACK(msg, el)                                \
+			learner_nc_dump_conntrack_tuple(msg, el)
+#else
+#define FP_DEBUG_DUMP_ENTRY(msg, el) do {} while (0)
+#define FP_DEBUG_DUMP_TUPLE(msg, el) do {} while (0)
+#define FP_DEBUG_DUMP_CONTRACK(msg, el) do {} while (0)
+#endif
+
+static const char *const tcp_conntrack_names[] = {
+	"NONE",
+	"SYN_SENT",
+	"SYN_RECV",
+	"ESTABLISHED",
+	"FIN_WAIT",
+	"CLOSE_WAIT",
+	"LAST_ACK",
+	"TIME_WAIT",
+	"CLOSE",
+	"SYN_SENT2",
+};
+
+#define SECS * HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+#define DAYS * 24 HOURS
+
+#define TCP_DEFAULT_TIMEOUT (5 DAYS)
+#define UDP_DEFAULT_TIMEOUT (5 SECS)
+#define UDP_DEFAULT_TIMEOUT_STREAM (180 SECS)
+
+#define IP_VER_4		4
+#define IP_VER_6		6
+#define IP_VERSION(pkt) (((struct iphdr *)(pkt))->version)
+#define NF_CT_NAT(ct) ((ct)->status & IPS_NAT_DONE_MASK)
+
+#ifdef SKB_P_SUPPORT
+#define FP_IS_SKB_P(skb) ((skb)->shared_info_ptr)
+#else
+#define FP_IS_SKB_P(skb) 0
+#endif
+static inline
+int fp_dump_tuple_ip(char *buf,const struct nf_conntrack_tuple *t, int verbose)
+{
+	int len = 0;
+	if (buf) {
+		if (verbose)
+			len += sprintf(buf, "%s ", t->dst.protonum ==
+				       IPPROTO_UDP ? "UDP" : "TCP");
+		len += sprintf(buf + len, "src=%pI4 dst=%pI4 sport=%hu dport=%hu",
+			       &t->src.u3.ip, &t->dst.u3.ip,
+			       ntohs(t->src.u.all), ntohs(t->dst.u.all));
+	}
+	return len;
+}
+
+static inline
+int fp_dump_tuple_ipv6(char *buf, const struct nf_conntrack_tuple *t, int verbose)
+{
+	int len = 0;
+	if (buf) {
+		if (verbose)
+			len += sprintf(buf, "%s ", t->dst.protonum ==
+				       IPPROTO_UDP ? "UDP" : "TCP");
+		len += sprintf(buf + len, "src=%pI6c dst=%pI6c sport=%hu dport=%hu",
+			       t->src.u3.all, t->dst.u3.all,
+			       ntohs(t->src.u.all), ntohs(t->dst.u.all));
+	}
+	return len;
+}
+
+static inline int __fp_dump_tuple(char *buf, const struct nf_conntrack_tuple *t, int verbose)
+{
+	if (t->src.l3num == AF_INET)
+		return fp_dump_tuple_ip(buf, t, verbose);
+	if (t->src.l3num == AF_INET6)
+		return fp_dump_tuple_ipv6(buf, t, verbose);
+
+	return 0;
+}
+
+static inline int fp_dump_tuple(char *buf, const struct nf_conntrack_tuple *t)
+{
+	return __fp_dump_tuple(buf, t, 1);
+}
+
+static inline void fp_print_tuple(char *msg, const struct nf_conntrack_tuple *t)
+{
+	if (t->src.l3num == AF_INET) {
+		pr_err("%s:%d(%s): %s %pI4:%hu -> %pI4:%hu\n",
+		       __func__, __LINE__, msg,
+		       t->dst.protonum ==
+		       IPPROTO_UDP ? "UDP" : "TCP",
+		       &t->src.u3.ip, ntohs(t->src.u.all),
+		       &t->dst.u3.ip, ntohs(t->dst.u.all));
+	}
+
+	if (t->src.l3num == AF_INET6) {
+		pr_err("%s:%d(%s): %s %pI6 %hu -> %pI6 %hu\n",
+		       __func__, __LINE__, msg,
+		       t->dst.protonum ==
+		       IPPROTO_UDP ? "UDP" : "TCP",
+		       t->src.u3.all, ntohs(t->src.u.all),
+		       t->dst.u3.all, ntohs(t->dst.u.all));
+	}
+}
+
+/* For debugging only */
+static inline void fp_dump_pkt_data(void *data, int len)
+{
+	void *start, *end;
+
+	start = data;
+	end = start + len;
+
+	while (start < end) {
+		pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			   (u32)((unsigned long)start - (unsigned long)data),
+			   ntohl(readl(start)), ntohl(readl(start + 0x4)),
+			   ntohl(readl(start + 0x8)), ntohl(readl(start + 0xc)));
+		start += 0x10;
+	}
+}
+
+void restore_ct(struct nf_conn *ct);
+
+/**------------------------------------------------------------**/
+/**			BACKWARD COMPATABILITY			**/
+/**------------------------------------------------------------**/
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+static inline int __must_check kref_get_unless_zero(struct kref *kref)
+{
+	return atomic_add_unless(&kref->refcount, 1, 0);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+static inline struct netdev_queue *
+netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+{
+	return dev_pick_tx(dev, skb);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)
+#define netif_xmit_frozen_or_stopped(txq) \
+	(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))
+#endif
+
+static inline struct dst_entry *rt6i_dst_get(struct rt6_info *rt)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+	return &rt->u.dst;
+#else
+	return &rt->dst;
+#endif
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+static inline void ip6_rt_put(struct rt6_info *rt)
+{
+	dst_release(rt6i_dst_get(rt));
+}
+#endif
+
+static inline int
+rt4_lookup(struct net *n, struct flowi *flp, struct fib_result *res)
+{
+	memset(res, 0, sizeof(*res));
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+	return fib_lookup(n, flp, res);
+#else
+	return fib_lookup(n, &flp->u.ip4, res, 0);
+#endif
+}
+
+static inline void ip4_rt_put(struct fib_result *res)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+	fib_res_put(res);
+#endif
+}
+
+#endif /* FP_COMMON_H */
diff --git a/package/kernel/mfp/files/fp_core.c b/package/kernel/mfp/files/fp_core.c
new file mode 100644
index 0000000..9bcc20a
--- /dev/null
+++ b/package/kernel/mfp/files/fp_core.c
@@ -0,0 +1,333 @@
+/**
+ *
+ *  (C) Copyright 2009-2016 Marvell International Ltd. All Rights Reserved
+ *
+ *  MARVELL CONFIDENTIAL
+ *  The source code contained or described herein and all documents related to
+ *  the source code ("Material") are owned by Marvell International Ltd or its
+ *  suppliers or licensors. Title to the Material remains with Marvell
+ *  International Ltd or its suppliers and licensors. The Material contains
+ *  trade secrets and proprietary and confidential information of Marvell or its
+ *  suppliers and licensors. The Material is protected by worldwide copyright
+ *  and trade secret laws and treaty provisions. No part of the Material may be
+ *  used, copied, reproduced, modified, published, uploaded, posted,
+ *  transmitted, distributed, or disclosed in any way without Marvell's prior
+ *  express written permission.
+ *
+ *  No license under any patent, copyright, trade secret or other intellectual
+ *  property right is granted to or conferred upon you by disclosure or delivery
+ *  of the Materials, either expressly, by implication, inducement, estoppel or
+ *  otherwise. Any license under such intellectual property rights must be
+ *  express and approved by Marvell in writing.
+ *
+ */
+
+
+#define pr_fmt(fmt) KBUILD_MODNAME " core :%s:%d: " fmt, __func__, __LINE__
+
+#include "fp_common.h"
+#include "fp_core.h"
+
+static const char *fastpath_driver_version = "Fastpath V2.1";
+static struct fastpath_driver *fastpath;
+
+#ifdef FASTPATH_WARN_AS_ERR
+unsigned int debug_level = (DBG_WARN_AS_ERR | DBG_INFO);
+#else
+unsigned int debug_level = (DBG_INFO);
+#endif
+
+/* fastpath modules - defines modules add/remove order */
+static struct fastpath_module_ops *fp_modules_ops[] = {
+	&fp_device_ops,
+	&fp_database_ops,
+	&fp_learner_ops,
+	&fp_classifier_ops,
+	&fp_forward_ops,
+	&fp_netlink_ops,
+#ifdef CONFIG_ASR_TOE
+	&fp_cm_ops,
+#endif
+
+};
+
+struct fastpath_module *fp_module_get_by_name(const char *name)
+{
+	int i;
+
+	for (i = 0; i < fastpath->num_modules; i++) {
+		if (!fastpath->modules[i])
+			continue;
+		if (!strncmp(fastpath->modules[i]->name, name, MAX_NAME_SZ)) {
+			kobject_get(&fastpath->modules[i]->kobj);
+			return fastpath->modules[i];
+		}
+	}
+
+	return NULL;
+}
+
+struct fastpath_module *fp_module_get_by_idx(int idx)
+{
+	if (fastpath->modules[idx]) {
+		kobject_get(&fastpath->modules[idx]->kobj);
+		return fastpath->modules[idx];
+	}
+	return NULL;
+}
+
+void fp_module_put(struct fastpath_module *m)
+{
+	kobject_put(&m->kobj);
+}
+
+/* debug flags: WDET*/
+static ssize_t fastpath_debug_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf)
+{
+	char flags[] = "WDET";
+	int i;
+
+	for (i = 0; i < DBG_LEVEL_END_BIT; i++)
+		if (!(debug_level & (1 << i)))
+			flags[i] = '-';
+
+	return scnprintf(buf, PAGE_SIZE, "fastpath debug flags: %s\n"
+				"\nAvailable Flags:\n"
+				"W - warnings as errors\n"
+				"D - fpdb entry debug info\n"
+				"E - Extended fpdb entry debug info (not used)\n"
+				"T - fpdb trace logging\n", flags);
+}
+
+static int set_or_clear_flag(char flag, int set)
+{
+	switch (flag) {
+	case 'w':
+	case 'W':
+		debug_level = (set) ? (debug_level | DBG_WARN_AS_ERR) :
+				      (debug_level & ~DBG_WARN_AS_ERR);
+		break;
+	case 'd':
+	case 'D':
+		debug_level = (set) ? (debug_level | DBG_INFO) :
+				      (debug_level & ~DBG_INFO);
+		break;
+	case 'e':
+	case 'E':
+		debug_level = (set) ? (debug_level | DBG_INFO_EXT) :
+				      (debug_level & ~DBG_INFO_EXT);
+		break;
+	case 't':
+	case 'T':
+		debug_level = (set) ? (debug_level | DBG_TRACE_LOG) :
+				      (debug_level & ~DBG_TRACE_LOG);
+		break;
+	default: return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t fastpath_debug_store(struct kobject *kobj,
+				      struct kobj_attribute *attr,
+				      const char *buf, size_t size)
+{
+	int count = size;
+	char c, flag;
+	const char *ptr = buf;
+	int ret, op;
+
+	if (ptr[0] == '0') {
+		debug_level = 0;
+		return size;
+	}
+
+	while (count > 1) {
+		c = ptr[0];
+		flag = ptr[1];
+
+		if (c == ' ') {
+			ptr++;
+			count--;
+			continue;
+		}
+		switch (c) {
+		case '+':
+			op = 1;
+			break;
+		case '-':
+			op = 0;
+			break;
+		default: return -EINVAL;
+		}
+
+		ret = set_or_clear_flag(flag, op);
+		if (ret < 0)
+			return ret;
+		ptr += 2;
+		count -= 2;
+	}
+	return size;
+}
+
+static struct kobj_attribute fastpath_debug_attribute =
+__ATTR(debug, S_IRUGO|S_IWUSR, fastpath_debug_show, fastpath_debug_store);
+
+static const struct attribute *fp_core_attrs[] = {
+	&fastpath_debug_attribute.attr,
+	NULL, /* need to NULL terminate the list of attributes */
+};
+
+#define to_attr(a) container_of(a, struct fp_attr, attr)
+
+static ssize_t fastpath_module_show(struct kobject *kobj,
+				     struct attribute *attr, char *buf)
+{
+	struct fastpath_module *module = to_fpmod(kobj);
+	struct fp_attr *mattr = to_attr(attr);
+
+	if (!module || !mattr || !mattr->show)
+		return -EINVAL;
+
+	return mattr->show(module, buf);
+}
+
+static ssize_t fastpath_module_store(struct kobject *kobj,
+				      struct attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct fastpath_module *module = to_fpmod(kobj);
+	struct fp_attr *mattr = to_attr(attr);
+
+	if (!module || !mattr || !mattr->store)
+		return -EINVAL;
+
+	return mattr->store(module, buf, count);
+}
+
+const struct sysfs_ops fp_sysfs_ops = {
+	.show = fastpath_module_show,
+	.store = fastpath_module_store,
+};
+
+static int __init fastpath_init(void)
+{
+	int ret, i;
+	struct fastpath_module *m;
+	struct net *net;
+
+	pr_debug("init start\n");
+
+	fastpath = kzalloc(sizeof(*fastpath), GFP_KERNEL);
+	if (!fastpath) {
+		pr_err("No Memory\n");
+		return -ENOMEM;
+	}
+
+	fastpath->version = fastpath_driver_version;
+	fastpath->num_modules = ARRAY_SIZE(fp_modules_ops);
+	fastpath->modules = kzalloc(fastpath->num_modules *
+				sizeof(struct fastpath_module *), GFP_KERNEL);
+	if (!fastpath->modules) {
+		pr_err("No Memory\n");
+		ret = -ENOMEM;
+		goto free_fastpath;
+	}
+
+	fastpath->kobj = kobject_create_and_add("fastpath", kernel_kobj);
+	if (!fastpath->kobj) {
+		pr_err("kobject_create_and_add failed\n");
+		ret = -ENOMEM;
+		goto free_modules;
+	}
+
+	ret = sysfs_create_files(fastpath->kobj, fp_core_attrs);
+	if (ret < 0) {
+		pr_err("sysfs_create_files failed (%d)\n", ret);
+		goto kobj_put;
+	}
+
+	for_each_net(net)
+		nf_ct_netns_get(net, NFPROTO_BRIDGE);
+
+	/* load fastpath modules */
+	for (i = 0; i < fastpath->num_modules; i++) {
+		struct fastpath_module_ops *ops = fp_modules_ops[i];
+
+		m = kzalloc(sizeof(*m), GFP_KERNEL);
+		if (!m) {
+			pr_err("no memeory\n");
+			ret = -ENOMEM;
+			goto unload_modules;
+		}
+
+		m->ops = ops;
+		m->fastpath = fastpath;
+		m->idx = i;
+
+		ret = m->ops->probe(m);
+		if (ret < 0) {
+			pr_err("Error loading [%d]:%s (%d)\n", i, m->name, ret);
+			kfree(m);
+			goto unload_modules;
+		}
+
+		/* module loaded */
+		fastpath->modules[i] = m;
+	}
+
+	pr_debug("init done\n");
+	return 0;
+
+unload_modules:
+	for (i = i - 1; i >= 0 ; i--) {
+		m = fastpath->modules[i];
+		m->ops->remove(m);
+	}
+	sysfs_remove_files(fastpath->kobj, fp_core_attrs);
+kobj_put:
+	kobject_put(fastpath->kobj);
+free_modules:
+	kfree(fastpath->modules);
+free_fastpath:
+	kfree(fastpath);
+	return ret;
+}
+
+
+static void __exit fastpath_exit(void)
+{
+	struct fastpath_module *m;
+	int i;
+	struct net *net;
+
+	pr_debug("exit start\n");
+
+	/* unload fastpath modules */
+	for (i = 0; i < fastpath->num_modules; i++) {
+		m = fastpath->modules[fastpath->num_modules - i - 1];
+		m->ops->remove(m);
+	}
+	sysfs_remove_files(fastpath->kobj, fp_core_attrs);
+	kobject_put(fastpath->kobj);
+	kfree(fastpath->modules);
+	kfree(fastpath);
+
+	for_each_net(net)
+		nf_ct_netns_put(net, NFPROTO_BRIDGE);
+
+	pr_debug("exit done\n");
+}
+
+module_param(debug_level, uint, 0644);
+
+module_init(fastpath_init)
+module_exit(fastpath_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomer Eliyahu tomere@marvell.com");
+MODULE_AUTHOR("Ram Marzin ramm@marvell.com");
+MODULE_AUTHOR("Yair Weiss yairw@marvell.com");
+MODULE_DESCRIPTION("Marvell Fastpath");
+MODULE_INFO(Version, __MODULE_STRING(FP_DRV_VERSION));
diff --git a/package/kernel/mfp/files/fp_core.h b/package/kernel/mfp/files/fp_core.h
new file mode 100644
index 0000000..ccaf7a8
--- /dev/null
+++ b/package/kernel/mfp/files/fp_core.h
@@ -0,0 +1,79 @@
+/**
+ *
+ *  (C) Copyright 2009-2016 Marvell International Ltd. All Rights Reserved
+ *
+ *  MARVELL CONFIDENTIAL
+ *  The source code contained or described herein and all documents related to
+ *  the source code ("Material") are owned by Marvell International Ltd or its
+ *  suppliers or licensors. Title to the Material remains with Marvell
+ *  International Ltd or its suppliers and licensors. The Material contains
+ *  trade secrets and proprietary and confidential information of Marvell or its
+ *  suppliers and licensors. The Material is protected by worldwide copyright
+ *  and trade secret laws and treaty provisions. No part of the Material may be
+ *  used, copied, reproduced, modified, published, uploaded, posted,
+ *  transmitted, distributed, or disclosed in any way without Marvell's prior
+ *  express written permission.
+ *
+ *  No license under any patent, copyright, trade secret or other intellectual
+ *  property right is granted to or conferred upon you by disclosure or delivery
+ *  of the Materials, either expressly, by implication, inducement, estoppel or
+ *  otherwise. Any license under such intellectual property rights must be
+ *  express and approved by Marvell in writing.
+ *
+ */
+
+#ifndef __FP_CORE_H__
+#define __FP_CORE_H__
+
+#define MAX_NAME_SZ 20
+
+struct fastpath_module;
+
+struct fastpath_driver {
+	const char *version;
+	struct kobject *kobj;
+	struct fastpath_module **modules;
+	int num_modules;
+};
+
+struct fastpath_module_ops {
+	int (*probe)(struct fastpath_module *);
+	int (*remove)(struct fastpath_module *);
+	int (*ioctl)(struct fastpath_module *, unsigned int cmd, void *data);
+};
+
+struct fastpath_module {
+	struct kobject			kobj;
+	char				name[MAX_NAME_SZ];
+	int				idx;
+	void				*priv;
+	struct fastpath_module_ops	*ops;
+	const struct fastpath_driver	*fastpath;
+};
+
+struct fp_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct fastpath_module *, char *);
+	ssize_t (*store)(struct fastpath_module *, const char *, size_t count);
+};
+
+#define FP_ATTR(_name, _mode, _show, _store) \
+	struct fp_attr fp_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+#define to_fpmod(k) container_of(k, struct fastpath_module, kobj)
+
+extern unsigned int debug_level; /* debug level bitmask - see fp_common.h */
+extern const struct sysfs_ops fp_sysfs_ops;
+extern struct fastpath_module_ops fp_device_ops;
+extern struct fastpath_module_ops fp_learner_ops;
+extern struct fastpath_module_ops fp_classifier_ops;
+extern struct fastpath_module_ops fp_forward_ops;
+extern struct fastpath_module_ops fp_database_ops;
+extern struct fastpath_module_ops fp_netlink_ops;
+extern struct fastpath_module_ops fp_cm_ops;
+
+struct fastpath_module *fp_module_get_by_name(const char *name);
+struct fastpath_module *fp_module_get_by_idx(int idx);
+void fp_module_put(struct fastpath_module *m);
+
+#endif
diff --git a/package/kernel/mfp/files/fp_database.h b/package/kernel/mfp/files/fp_database.h
new file mode 100644
index 0000000..1766c79
--- /dev/null
+++ b/package/kernel/mfp/files/fp_database.h
@@ -0,0 +1,147 @@
+#ifndef FP_DATABASE_H
+#define FP_DATABASE_H
+
+/*
+ *	Fast path data base
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+/**--------------------------------------
+ * Enum
+ *--------------------------------------*/
+
+
+static const char *const entry_state_names[] = {
+	"initialized",
+	"alive",
+	"dying",
+};
+
+enum entry_state {
+	ENTRY_INITIALIZED,
+	ENTRY_ALIVE,
+	ENTRY_DYING,
+};
+
+/**--------------------------------------
+ * STRUCTURES
+ *--------------------------------------*/
+
+/* Trace buffer per entry (enabled via DBG_LVL) */
+struct fpdb_trace {
+	struct list_head list;
+	int sz;
+	unsigned int hit_counter;
+	unsigned int timeout;
+	struct tcphdr tcph;
+	unsigned int tcp_state;
+	unsigned long ct_status; /* have we seen direction in both ways? */
+};
+
+struct fpdb_debug {
+	unsigned int in_route_type;
+	unsigned int out_route_type;
+	struct fpdb_trace trace;
+};
+
+struct fpdb_entry {
+	/* Managment fields - DO NOT USE!*/
+	struct hlist_node hlist;
+	struct rcu_head rcu;
+	atomic_t rc;
+	spinlock_t lock;
+	struct timer_list *guard_timer;
+	struct work_struct work;
+
+	/* Entry data*/
+	unsigned long tstamp; /* data base aging out use this field */
+	struct nf_conn *ct;
+	enum ip_conntrack_dir dir;
+	unsigned long flags;
+	struct hh_cache hh;
+	struct nf_conntrack_tuple in_tuple;
+	struct nf_conntrack_tuple out_tuple;
+	struct fp_net_device *out_dev;	/* destination interface handle */
+	struct fp_net_device *in_dev;	/* source interface handle */
+	enum entry_state state;
+	bool block;
+
+	/* statistics & debug */
+	unsigned int bucket;
+	unsigned int hit_counter;
+	struct fpdb_debug debug;
+#ifdef CONFIG_ASR_TOE
+	unsigned int nl_flag;
+	unsigned long detect_speed_jiffies;
+	unsigned int detect_speed_bytes;
+	unsigned int speed; /* Kbps */
+#endif
+};
+
+struct nf_conn_fastpath {
+	struct fpdb_entry *fpd_el[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn_fastpath *nfct_fastpath(const struct nf_conn *ct)
+{
+	return nf_ct_ext_find(ct, NF_CT_EXT_FASTPATH);
+}
+
+struct hist_entry {
+	unsigned int buckets;
+	unsigned int entries;
+};
+
+#define HISTOGRAM_SIZE			(16)
+
+struct fpdb_stats {
+	/* HASH statistics */
+	u32 size;		/* number of buckets */
+	u32 lookups;		/* total lookups */
+	u32 iterations;		/* iterations per lookups */
+	u32 hits;		/* successfull lookups */
+	u32 largest_bucket;	/* Number of entries in largest bucket */
+	u32 num_occupied;	/* Number of occupied buckets */
+	u32 load_factor;	/* num of hashed entries / num of buckets */
+
+				/* HISTOGRAM */
+	struct hist_entry hist[HISTOGRAM_SIZE + 1];
+
+	/* Database statistics */
+	u32 avg_lookup_latency;
+	u32 max_lookup_latency;
+	u32 max_entries;	/* max num of hashed entries */
+};
+
+/**--------------------------------------
+ * API FUNCTIONS
+ *--------------------------------------*/
+
+void fpdb_lock_bh(void);
+void fpdb_unlock_bh(void);
+struct fpdb_entry *fpdb_alloc(gfp_t flags);
+void fpdb_dump_entry(char *msg, struct fpdb_entry *el);
+void fpdb_add(struct fpdb_entry *el);
+void fpdb_replace(struct fpdb_entry *el, struct fpdb_entry *nel);
+void fpdb_del(struct fpdb_entry *entry);
+struct fpdb_entry *fpdb_get(struct nf_conntrack_tuple *tuple);
+void fpdb_put(struct fpdb_entry *entry);
+void fpdb_del_by_dev(struct net_device *dev);
+void fpdb_del_by_port(unsigned int);
+void fpdb_flush(void);
+void fpdb_del_least_used_entry(int max_num);
+void fpdb_trace(struct fpdb_entry *entry, struct tcphdr *tcph);
+void fpdb_iterate(int (*iter)(struct fpdb_entry *e, void *data), void *data);
+void fpdb_free(struct fpdb_entry * el);
+int fpdb_del_block_entry_by_dev(struct fpdb_entry *el, void *data);
+
+#ifdef FP_USE_SRAM_POOL_OPT
+extern unsigned long sram_pool_alloc(size_t size);
+extern void sram_pool_free(unsigned long addr, size_t size);
+#endif
+
+#endif /* FP_DATABASE_H */
diff --git a/package/kernel/mfp/files/fp_database_hash.c b/package/kernel/mfp/files/fp_database_hash.c
new file mode 100644
index 0000000..be35203
--- /dev/null
+++ b/package/kernel/mfp/files/fp_database_hash.c
@@ -0,0 +1,1454 @@
+/*
+ *	Fast path database hash implementation
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU FP_ERR( Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	Notes:
+ *	Implementation according to Documentation/RCU/rcuref.txt
+ */
+
+#define pr_fmt(fmt) "mfp" " database:%s:%d: " fmt, __func__, __LINE__
+
+#include "fp_common.h"
+#include "fp_database.h"
+#include "fp_device.h"
+#include "fp_core.h"
+#ifdef CONFIG_ASR_TOE
+#include "../linux/drivers/marvell/toev2/toe.h"
+#endif
+
+#define FP_ZONE			(NF_CT_DEFAULT_ZONE_ID)
+#define GUARD_TIMEOUT_SEC		(10)
+
+static u32 hash_rnd __read_mostly;
+
+static inline const char *state_to_string(enum entry_state state)
+{
+	return entry_state_names[state];
+}
+
+static inline int __fpdb_dump_entry(char *buf, struct fpdb_entry *el)
+{
+	int len = sprintf(buf, "fpdb dump entry (0x%p):\n", el);
+
+	len += fp_dump_tuple(buf + len, &el->in_tuple);
+	len += sprintf(buf + len, "\n");
+	len += fp_dump_tuple(buf + len, &el->out_tuple);
+	if (el->hh.hh_len) {
+		struct ethhdr *eth = (struct ethhdr *)(((u8 *) el->hh.hh_data) +
+					(HH_DATA_OFF(sizeof(*eth))));
+		len += sprintf(buf + len, "\nMAC header: src=%pM dst=%pM type=%04x\n",
+			       eth->h_source, eth->h_dest, eth->h_proto);
+	} else {
+		len += sprintf(buf + len, "\nMAC header was not set\n");
+	}
+	len += sprintf(buf + len, "Interfaces: in %p: %s, out %p: %s\n",
+			el->in_dev,el->in_dev->dev->name, el->out_dev,el->out_dev->dev->name);
+	len += sprintf(buf + len, "State: %s hits=%d pointer=%p\n",
+		       state_to_string(el->state), el->hit_counter, el);
+	len += sprintf(buf + len, "ct info: ct=%p timeout: %x rc=%d\n",
+		       el->ct, el->ct->timeout, atomic_read(&el->rc));
+
+	if (debug_level & DBG_INFO)
+		len += sprintf(buf + len, "DEBUG: (NAT=%s) (route: in=%d out=%d)\n",
+			   NF_CT_NAT(el->ct) ? "YES" : "NO",
+			   el->debug.in_route_type, el->debug.out_route_type);
+
+	return len;
+}
+
+void fpdb_dump_entry(char *msg, struct fpdb_entry *el)
+{
+	char buf[MAX_DEBUG_PRINT_SIZE];
+	int len = 0;
+
+	BUG_ON(!el);
+
+	if (msg)
+		len = sprintf(buf, "%s", msg);
+
+	len += __fpdb_dump_entry(buf + len, el);
+
+	pr_err("%s", buf);
+}
+
+void fpdb_dump_tuple(char *msg, struct nf_conntrack_tuple *t)
+{
+	char buf[MAX_DEBUG_PRINT_SIZE];
+	int len = 0;
+
+	BUG_ON(!t);
+
+	if (msg)
+		len = sprintf(buf, "%s", msg);
+
+	len += sprintf(buf + len, "fpdb dump tuple:\n");
+	len += fp_dump_tuple(buf + len, t);
+
+	pr_err("%s\n", buf);
+}
+
+static int fpdb_print_entry(struct fpdb_entry *el, void *data)
+{
+	char in[256], out[256];
+	unsigned int state, use;
+	int *first_entry = data;
+
+	if (atomic_inc_not_zero(&el->rc)) {
+		if (*first_entry == true) {
+			pr_err("l2  l3  l4  timeout\thash\thits\tstate  in_dev  out_dev  tuple_in  tuple_out ct block use refcnt\n");
+			*first_entry = false;
+		}
+		__fp_dump_tuple(in, &el->in_tuple, 0);
+		__fp_dump_tuple(out, &el->out_tuple, 0);
+		state = el->ct->proto.tcp.state;
+		use = atomic_read(&el->ct->ct_general.use);
+		pr_err("%s  %s  %s  %d\t%d\t%d\t%s  %s  %s  %s  %s  %p  %d  %d  %d\n",
+				  el->hh.hh_len ? "eth" : "NA",
+				  el->in_tuple.src.l3num == AF_INET6 ?
+				  "ipv6" : "ipv4",
+				  el->in_tuple.dst.protonum == IPPROTO_UDP ?
+				  "udp" : "tcp",
+				  jiffies_to_msecs(el->ct->timeout - jiffies) / 1000U,
+				  el->bucket, el->hit_counter,
+				  el->in_tuple.dst.protonum == IPPROTO_UDP ?
+				  "N/A" : tcp_conntrack_names[state],
+				  el->in_dev->dev->name,
+				  el->out_dev->dev->name,
+				  in, out, el->ct, el->block, use, atomic_read(&el->rc));
+		fpdb_put(el);
+	}
+	return 0;
+}
+
+void fpdb_dump_db(void)
+{
+	int first_entry = true;
+	fpdb_iterate(fpdb_print_entry, &first_entry);
+}
+
+/****************************************************************************
+ *			Fast Path Database prototypes
+ ****************************************************************************/
+
+struct fpdb_htable {
+	struct hlist_head *h;
+	unsigned int size;
+	int vmalloced;
+};
+
+struct fp_database {
+	struct fpdb_stats stats;
+	volatile u32 num_entries;
+	struct fpdb_htable htable;
+	spinlock_t lock;
+	struct nf_ct_ext_type *nfct_ext;
+	struct kmem_cache *db_cache;
+};
+
+struct timeout_entry {
+	struct list_head list;
+	struct timer_list *timeout;
+};
+/****************************************************************************
+ *			Fast Path Database globals
+ ****************************************************************************/
+
+static struct fp_database *db;
+/* TODO - do we need something else here??
+   Or is there only one "net" in ESHEL? */
+struct net *net = &init_net;
+
+#ifdef CONFIG_ASR_TOE
+extern int fp_cm_genl_send_tuple(struct nf_conntrack_tuple *tuple, struct fpdb_entry *el,
+									 int add, int len);
+static inline bool get_remote_mac_addr(struct fpdb_entry *el, char *mac)
+{
+	struct neighbour *neigh;
+	struct neigh_table *tbl;
+	struct nf_conntrack_tuple *tuple;
+	struct net_device *br;
+
+	if (el->in_dev->br != el->out_dev->br)
+		return false;
+
+	tuple = &el->in_tuple;
+	br = el->out_dev->br;
+	tbl = (tuple->src.l3num == AF_INET6) ? &nd_tbl : &arp_tbl;
+
+	neigh = neigh_lookup(tbl, tuple->dst.u3.all, br);
+	if (neigh) {
+		memcpy(mac, neigh->ha, ETH_ALEN);
+		neigh_release(neigh);
+		return true;
+	}
+
+	return false;
+}
+
+#endif
+
+static void guard_timer_timeout(struct timer_list *t)
+{
+	struct fpdb_entry *el = from_timer(el, &t, guard_timer);
+
+	pr_err("Entry was hold and could not be removed for %d sec. [%px][rc=%d] state=%d\n",
+	       GUARD_TIMEOUT_SEC, el, atomic_read(&el->rc), el->state);
+
+	/* BUG_ON(debug_level & DBG_WARN_AS_ERR);*/
+	if (atomic_read(&el->rc) > 0) {
+		FP_ERR_DUMP_ENTRY(NULL, el);
+		pr_err("Extend the timer when rc is not 0!\n");
+		mod_timer(el->guard_timer, jiffies + GUARD_TIMEOUT_SEC * HZ);
+	}
+}
+
+#ifdef FP_USE_SRAM_POOL_OPT
+static void *local_nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
+{
+	struct hlist_nulls_head *hash;
+	unsigned int nr_slots, i;
+	size_t sz;
+
+	BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
+	nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+	sz = nr_slots * sizeof(struct hlist_nulls_head);
+	hash = (void *)sram_pool_alloc(sz);
+	if (hash && nulls)
+		for (i = 0; i < nr_slots; i++)
+			INIT_HLIST_NULLS_HEAD(&hash[i], i);
+	return hash;
+}
+
+static void local_nf_ct_free_hashtable(void *hash, unsigned int size)
+{
+	sram_pool_free((unsigned long)hash, size * sizeof(struct hlist_nulls_head));
+}
+#endif
+
+static inline int fpdb_alloc_hashtable(struct fpdb_htable *htable)
+{
+	/* Currently use the same size used by others.. */
+	htable->size = nf_conntrack_htable_size;
+#ifdef FP_USE_SRAM_POOL_OPT
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	htable->h = local_nf_ct_alloc_hashtable(&htable->size, &htable->vmalloced, 0);
+#else
+	htable->h = local_nf_ct_alloc_hashtable(&htable->size, 0);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	htable->h = nf_ct_alloc_hashtable(&htable->size, &htable->vmalloced, 0);
+#else
+	htable->h = nf_ct_alloc_hashtable(&htable->size, 0);
+#endif
+#endif
+	if (!htable->h)
+		return -ENOMEM;
+
+	pr_debug("allocated fpdb hashtable (size = %d)\n", htable->size);
+
+	return 0;
+}
+
+static inline void fpdb_free_hashtable(struct fpdb_htable *htable)
+{
+#ifdef FP_USE_SRAM_POOL_OPT
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	local_nf_ct_free_hashtable(htable->h, htable->vmalloced, htable->size);
+#else
+	local_nf_ct_free_hashtable(htable->h, htable->size);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	nf_ct_free_hashtable(htable->h, htable->vmalloced, htable->size);
+#else
+	kvfree(htable->h);
+	htable->h = NULL;
+#endif
+#endif
+}
+
+/****************************************************************************
+ *			Fast Path Database API
+ ****************************************************************************/
+
+/**
+ * Allocates and initializes a new database entry
+ *
+ * @param flags  kmalloc flags
+ *
+ * @return new allocated and initialized database entry
+ */
+struct fpdb_entry *fpdb_alloc(gfp_t flags)
+{
+	struct fpdb_entry *el;
+
+#ifdef FP_USE_SRAM_POOL_OPT
+	el = (struct fpdb_entry *)sram_pool_alloc(sizeof(struct fpdb_entry));
+#else
+	el = kmem_cache_zalloc(db->db_cache, flags);
+#endif
+	if (!el) {
+		pr_err("no memory\n");
+		return NULL;
+	}
+
+	spin_lock_init(&el->lock);
+	INIT_HLIST_NODE(&el->hlist);
+	INIT_LIST_HEAD(&el->debug.trace.list);
+
+	el->state = ENTRY_INITIALIZED;
+#ifdef CONFIG_ASR_TOE
+	el->nl_flag = 0;
+#endif
+	return el;
+}
+
+
+/**
+ * Free a database entry
+ *
+ * @param flags  fpdb_entry * e
+ *
+ * @return void
+ */
+void fpdb_free(struct fpdb_entry * el)
+{
+	fpdev_put(el->out_dev);
+	fpdev_put(el->in_dev);
+
+#ifdef FP_USE_SRAM_POOL_OPT
+	sram_pool_free((unsigned long)el, sizeof(struct fpdb_entry));
+#else
+	kmem_cache_free(db->db_cache, el);
+#endif
+	return;
+}
+
+
+/**
+ * jenkins hash function using the source tuple
+ *
+ * @return hash key
+ */
+static inline unsigned int
+fpdb_hash_by_src(const struct nf_conntrack_tuple *tuple)
+{
+	unsigned int hash_src, hash_dst, hash;
+
+	BUG_ON(!tuple);
+
+	hash_src = jhash_3words((__force u32) tuple->src.u3.ip,
+			    (__force u32) tuple->src.u.all ^ FP_ZONE,
+			    tuple->src.l3num, hash_rnd);
+	hash_dst = jhash_3words((__force u32) tuple->dst.u3.ip,
+			    (__force u32) tuple->dst.u.all ^ FP_ZONE,
+			    tuple->dst.protonum, hash_rnd);
+	hash = jhash_2words(hash_src, hash_dst, hash_rnd);
+
+	return ((u64)hash * db->htable.size) >> 32;
+}
+
+/**
+ * rcu callback
+ *
+ * @param head
+ */
+static void fpdb_rcu_free(struct rcu_head *head)
+{
+	struct fpdb_entry *el = container_of(rcu_dereference(head),
+		struct fpdb_entry, rcu);
+
+	if (el == NULL) {
+		pr_err("fpdb_rcu_free el = NULL!\n");
+		return;
+	}
+
+	BUG_ON(!el || atomic_read(&el->rc) || el->state != ENTRY_DYING);
+
+	FP_DEBUG_DUMP_ENTRY("fpdb_rcu_free: entry was deleted\n", el);
+
+	if (el->guard_timer) {
+		del_timer_sync(el->guard_timer);
+		kfree(el->guard_timer);
+		el->guard_timer = NULL;
+	}
+
+	spin_lock_bh(&db->lock);
+	db->num_entries--;
+	spin_unlock_bh(&db->lock);
+
+	fpdev_put(el->out_dev);
+	fpdev_put(el->in_dev);
+
+#ifdef FP_USE_SRAM_POOL_OPT
+	sram_pool_free((unsigned long)el, sizeof(struct fpdb_entry));
+#else
+	kmem_cache_free(db->db_cache, el);
+#endif
+}
+
+
+
+/**
+ * decrement an entry's reference count and delete if 0
+ *
+ * @param el  pointer to a previously allocated fpdb_entry
+ */
+void fpdb_put(struct fpdb_entry *el)
+{
+	if (atomic_dec_and_test(&el->rc))
+		call_rcu(&el->rcu, fpdb_rcu_free);
+}
+
+
+#define FP_SMALL_MEM_LIMIT	(64 * 1024 * 1204)
+/**
+ * Adds a previously allocated entry to the database
+ * and updates its reference count to 1.
+ *
+ * @attention el must be allocated first with fpdb_alloc()
+ *      Initial Implementation - Hash by input tuple only
+ * @param el  pointer to a previously allocated fpdb_entry
+ *
+ */
+void fpdb_add(struct fpdb_entry *el)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+	int pages = totalram_pages();
+#else
+	int pages = totalram_pages;
+#endif
+	unsigned int hash;
+	u32 max_num;
+
+#ifdef CONFIG_ASR_TOE
+	char mac[ETH_ALEN];
+#endif
+
+	spin_lock_bh(&el->lock);
+	spin_lock_bh(&db->lock);
+	BUG_ON(!el || !el->out_dev);
+	BUG_ON(el->state != ENTRY_INITIALIZED);
+
+	hash = fpdb_hash_by_src(&el->in_tuple);
+
+	atomic_set(&el->rc, 1);
+	el->state = ENTRY_ALIVE;
+	el->bucket = hash;
+	el->tstamp = jiffies;
+	if (!el->tstamp)
+		el->tstamp = 1;
+
+	BUG_ON(in_irq());
+	WARN_ON_ONCE(irqs_disabled());
+	hlist_add_head_rcu(&el->hlist, &db->htable.h[hash]);
+	db->num_entries++;
+
+#ifdef CONFIG_ASR_TOE
+	if (get_remote_mac_addr(el, mac))
+		mfp_toe_add_dmac(el->out_dev->dev, mac);
+#endif
+
+	spin_unlock_bh(&db->lock);
+	spin_unlock_bh(&el->lock);
+
+	/* Normally Conntrack MAX is HashSize*8. So here is not suit to only check double*/
+	/*we will modify the code to check 6 times of hash size --Yhuang 20160617*/
+
+	if (pages <= (FP_SMALL_MEM_LIMIT >> PAGE_SHIFT))
+		max_num = 2 * db->htable.size;
+	else
+		max_num = 6 * db->htable.size;
+
+	if (unlikely(db->num_entries > max_num)) {
+		pr_err_ratelimited("%s: database overloaded (%d entries, max=%d)\n",
+			   __func__, db->num_entries, max_num);
+		/*
+		if (debug_level & DBG_WARN_AS_ERR) {
+			fpdb_dump_db();
+			BUG();
+		}
+		*/
+		fpdb_flush();
+	} else if (unlikely(db->num_entries > ((max_num * 3) / 4))) {
+		fpdb_del_least_used_entry(max_num);
+	}
+
+	if (db->stats.max_entries < db->num_entries)
+		db->stats.max_entries = db->num_entries;
+	FP_DEBUG_DUMP_ENTRY("fpdb_add: entry was added\n", el);
+}
+
+/**
+ * Query the database for an entry matching the input tuple
+ * and increment the reference count for that entry if found.
+ *
+ * @attention The user MUST call fpdb_put() as soon as the entry
+ *            is not used!
+ *
+ * @param tuple  pointer to a nf_conntrack_tuple
+ *
+ * @return pointer to the matching entry, NULL if not found
+ */
+struct fpdb_entry *fpdb_get(struct nf_conntrack_tuple *tuple)
+{
+	unsigned int hash, iterations = 0;
+	struct fpdb_entry *el;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	struct hlist_node *h;
+#endif
+
+	BUG_ON(!tuple);
+
+	db->stats.lookups++;
+
+	hash = fpdb_hash_by_src(tuple);
+
+	rcu_read_lock_bh();
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	hlist_for_each_entry_rcu(el, h, &db->htable.h[hash], hlist) {
+#else
+	hlist_for_each_entry_rcu(el, &db->htable.h[hash], hlist) {
+#endif
+		if (el && nf_ct_tuple_equal(&el->in_tuple, tuple)) {
+			if (!atomic_inc_not_zero(&el->rc))
+				goto not_found;
+			rcu_read_unlock_bh();
+			if (!iterations)
+				db->stats.hits++;
+			el->hit_counter++;
+			FP_DEBUG_DUMP_ENTRY("fpdb_get: entry was found:\n", el);
+			return el;
+		}
+		iterations++;
+		db->stats.iterations++; /* Total Iterations*/
+	}
+
+not_found:
+	rcu_read_unlock_bh();
+	FP_DEBUG_DUMP_TUPLE("fpdb_get: entry was not found:\n", tuple);
+
+	return NULL;
+}
+
+#ifdef CONFIG_ASR_TOE
+static int fpdb_del_toe_tuple(struct fpdb_entry *el)
+{
+	struct toe_tuple_buff toe_tuple;
+	struct fp_net_device *dst, *src;
+	u32 nat_ip = 0;
+	u16 nat_port = 0;
+	u8 proto = 0, in_pkt = 0, out_pkt = 0, fwd = 0, nat = 0;
+	u8 rx_tx;
+
+	BUG_ON(!el);
+
+	if (!el->nl_flag)
+		return 0;
+
+	src = rcu_dereference_bh(el->in_dev);
+	dst = rcu_dereference_bh(el->out_dev);
+	if (!strncasecmp(src->dev->name, "ccinet", 6))
+		in_pkt = PDU_PKT;
+	else if (!strncasecmp(src->dev->name, "usbnet", 6))
+		in_pkt = USB_PKT;
+	else if (!strncasecmp(src->dev->name, "wlan", 4))
+		in_pkt = WIFI_PKT;
+	else if (!strncasecmp(src->dev->name, "eth", 3))
+		in_pkt = ETH_PKT;
+	else
+		in_pkt = AP_PKT;
+
+	if (!strncasecmp(dst->dev->name, "ccinet", 6))
+		out_pkt = PDU_PKT;
+	else if (!strncasecmp(dst->dev->name, "usbnet", 6))
+		out_pkt = USB_PKT;
+	else if (!strncasecmp(dst->dev->name, "wlan", 4))
+		out_pkt = WIFI_PKT;
+	else if (!strncasecmp(dst->dev->name, "eth", 3))
+		out_pkt = ETH_PKT;
+	else
+		out_pkt = AP_PKT;
+
+	fwd = (in_pkt != AP_PKT) && (out_pkt != AP_PKT);
+	if (fwd && (el->out_tuple.src.l3num == AF_INET)) {
+		if (in_pkt == PDU_PKT && (out_pkt == USB_PKT || out_pkt == WIFI_PKT || out_pkt == ETH_PKT)) {
+			nat = 1;
+			nat_ip = ntohl(el->out_tuple.src.u3.ip);
+			nat_port = ntohs(el->out_tuple.src.u.all);
+		} else if ((in_pkt == USB_PKT || in_pkt == WIFI_PKT || in_pkt == ETH_PKT) && out_pkt == PDU_PKT) {
+			nat = 1;
+			nat_ip = ntohl(el->out_tuple.dst.u3.ip);
+			nat_port = ntohs(el->out_tuple.dst.u.all);
+		} else
+			/* CP TOE WIFI/WIFI TOE CP no need nat */
+			nat = 0;
+	}
+
+	/* rx: cp -> ap, usb, wifi */
+	if (in_pkt == PDU_PKT)
+		rx_tx = 1;
+	/* rx: ap -> usb, ap -> wifi */
+	else if ((in_pkt == AP_PKT) && (out_pkt != PDU_PKT))
+		rx_tx = 1;
+	/*
+	 * tx:
+	 * ap -> cp
+	 * usb/wifi -> ap/cp */
+	else
+		rx_tx = 0;
+
+	if (el->in_tuple.src.l3num == AF_INET6) {
+		memcpy(toe_tuple.src_ip6, el->in_tuple.src.u3.all, sizeof(toe_tuple.src_ip6));
+		memcpy(toe_tuple.dst_ip6, el->in_tuple.dst.u3.all, sizeof(toe_tuple.src_ip6));
+		toe_tuple.ip6 = 1;
+	} else {
+		toe_tuple.src_ip = ntohl(el->in_tuple.src.u3.ip);
+		toe_tuple.dst_ip = ntohl(el->in_tuple.dst.u3.ip);
+		toe_tuple.ip6 = 0;
+		toe_tuple.nat = nat;
+		toe_tuple.nat_port = nat_port;
+		toe_tuple.nat_ip = nat_ip;
+	}
+
+	if (el->in_tuple.dst.protonum == IPPROTO_UDP)
+		proto = TOE_UDP;
+	else if (el->in_tuple.dst.protonum == IPPROTO_TCP)
+		proto = TOE_TCP;
+	else
+		proto = TOE_MAX;
+
+	toe_tuple.src_port = ntohs(el->in_tuple.src.u.all);
+	toe_tuple.dst_port = ntohs(el->in_tuple.dst.u.all);
+	toe_tuple.prot = proto;
+	toe_tuple.fwd = fwd;
+	toe_tuple.rxtx = rx_tx;
+	toe_tuple.out_pkt = out_pkt;
+
+	return toe_del_connection(&toe_tuple);
+}
+#endif
+
+void __fpdb_del(struct fpdb_entry *entry, bool hlist_del)
+{
+	BUG_ON(!entry);
+	if(entry->state != ENTRY_ALIVE)
+		return;
+
+	entry->state = ENTRY_DYING;
+
+#ifdef CONFIG_ASR_TOE
+	if (entry->nl_flag) {
+		fp_cm_genl_send_tuple(&entry->in_tuple, entry, 0, 0);
+		if (fpdb_del_toe_tuple(entry))
+			pr_debug("fpdb_del_toe_tuple failed!!!\r\n");
+		entry->nl_flag = 0;
+	}
+#endif
+
+	BUG_ON(entry->guard_timer);
+	if (hlist_del)
+		hlist_del_rcu(&entry->hlist);
+
+	if (atomic_dec_and_test(&entry->rc)) {
+		/* move start timer here to avoid rc is not zero yhuang 20160624*/
+		entry->guard_timer = kmalloc(sizeof(*entry->guard_timer), GFP_ATOMIC);
+		if (entry->guard_timer) {
+			timer_setup(entry->guard_timer, guard_timer_timeout, 0);
+			mod_timer(entry->guard_timer, jiffies + GUARD_TIMEOUT_SEC * HZ);
+		} else {
+			pr_err("Guard timer allocation failed!");
+		}
+
+		/* prevent out of order so that guard timer can be stopped */
+		mb();
+		call_rcu(&entry->rcu, fpdb_rcu_free);
+	} else {
+		pr_err("__fpdb_del fail. entry:%p, rc=%d, state=%d\n", entry,
+			atomic_read(&entry->rc), entry->state);
+	}
+}
+
+void fpdb_lock_bh(void)
+{
+	return spin_lock_bh(&db->lock);
+}
+
+void fpdb_unlock_bh(void)
+{
+	return spin_unlock_bh(&db->lock);
+}
+
+void fpdb_del(struct fpdb_entry *entry)
+{
+	spin_lock_bh(&db->lock);
+	__fpdb_del(entry, true);
+	spin_unlock_bh(&db->lock);
+}
+
+/**
+ * Replace a previously allocated entry with an prexisting one
+ * to the database.
+ *
+ * @attention nel must be allocated first with fpdb_alloc()
+ *            el - must be already in the database/
+ * @param el  pointer to a previously added fpdb_entry
+ * @param nel  pointer to a newely allocated fpdb_entry
+ * NOTE: must be called from softirq/lock_bh context
+ */
+void fpdb_replace(struct fpdb_entry *el, struct fpdb_entry *nel)
+{
+	unsigned int hash;
+
+	BUG_ON(!el || !el->out_dev);
+	BUG_ON(!nel || !nel->out_dev);
+	BUG_ON(nel->state != ENTRY_INITIALIZED);
+
+	hash = fpdb_hash_by_src(&nel->in_tuple);
+
+	atomic_set(&nel->rc, 1);
+	nel->state = ENTRY_ALIVE;
+	nel->bucket = hash;
+
+	BUG_ON(el->bucket != nel->bucket);
+
+	db->num_entries++;
+	hlist_replace_rcu(&el->hlist, &nel->hlist);
+	__fpdb_del(el, false);
+}
+
+
+static int device_cmp(struct nf_conn *ct, void *dev)
+{
+	struct nf_conn_fastpath *fp = nfct_fastpath(ct);
+	struct fpdb_entry *orig, *reply;
+	struct net_device *net = (struct net_device *)dev;
+
+	if (!fp)
+		return 0;
+
+	orig = fp->fpd_el[IP_CT_DIR_ORIGINAL];
+	reply = fp->fpd_el[IP_CT_DIR_REPLY];
+
+	if (orig && (fpdev_cmp_if(orig->in_dev, net) ||
+		     fpdev_cmp_if(orig->out_dev, net)))
+		return 1;
+	if (reply && (fpdev_cmp_if(reply->in_dev, net) ||
+		      fpdev_cmp_if(reply->out_dev, net)))
+		return 1;
+
+	return 0;
+}
+
+static inline bool
+tuple_cmp_port(const struct nf_conntrack_tuple *t, unsigned int port)
+{
+	return (ntohs(t->dst.u.all) == port || ntohs(t->src.u.all) == port);
+}
+
+static int port_cmp(struct nf_conn *ct, void *ptr)
+{
+	struct nf_conn_fastpath *fp = nfct_fastpath(ct);
+	struct fpdb_entry *orig, *reply;
+	unsigned int port = (unsigned int)(unsigned long)ptr;
+
+	if (!fp)
+		return 0;
+
+	orig = fp->fpd_el[IP_CT_DIR_ORIGINAL];
+	reply = fp->fpd_el[IP_CT_DIR_REPLY];
+
+	if (orig && (tuple_cmp_port(&orig->in_tuple, port) ||
+		     tuple_cmp_port(&orig->out_tuple, port)))
+		return 1;
+	if (reply && (tuple_cmp_port(&reply->in_tuple, port) ||
+		      tuple_cmp_port(&reply->out_tuple, port)))
+		return 1;
+
+	return 0;
+}
+
+/* kill all fastpath related conntracks */
+static int nf_fp_remove(struct nf_conn *ct, void *data)
+{
+	return test_bit(IPS_FASTPATH_BIT, &ct->status);
+}
+
+int fpdb_del_block_entry_by_dev(struct fpdb_entry *el, void *data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct nf_conn_fastpath *ct_fp;
+
+	if (fpdev_cmp_if(el->in_dev, dev) ||
+	    fpdev_cmp_if(el->out_dev, dev)) {
+
+		spin_lock_bh(&db->lock);
+		ct_fp = nfct_fastpath(el->ct);
+		if (ct_fp) {
+			if (ct_fp->fpd_el[el->dir] == NULL) {
+				spin_unlock_bh(&db->lock);
+				return 0;
+			}
+
+			ct_fp->fpd_el[el->dir] = NULL;
+		}
+		spin_unlock_bh(&db->lock);
+
+		fpdb_del(el);
+		printk(KERN_DEBUG "delete a block entry related to %s\n", dev->name);
+	}
+
+	return 0;
+}
+
+static int nf_fpdb_del(struct nf_conn *ct, void *del)
+{
+	struct nf_conn_fastpath *fp = nfct_fastpath(ct);
+	struct fpdb_entry *orig, *reply;
+
+	if (!fp)
+		return 0;
+
+	orig = fp->fpd_el[IP_CT_DIR_ORIGINAL];
+	reply = fp->fpd_el[IP_CT_DIR_REPLY];
+
+	if (orig && orig == (struct fpdb_entry *)del) {
+		orig->tstamp = 0;
+		return 1;
+	}
+
+	if (reply && reply == (struct fpdb_entry *)del) {
+		reply->tstamp = 0;
+		return 1;
+	}
+
+	return 0;
+}
+
+static int fpdb_find_lest_used_entry(struct fpdb_entry *el, void *data)
+{
+	struct fpdb_entry **p_el = (struct fpdb_entry **)data;
+
+	if (!*p_el)
+		*p_el = el;
+	else if (el->tstamp && time_before(el->tstamp, (*p_el)->tstamp))
+		*p_el = el;
+
+	return 0;
+}
+
+void fpdb_del_least_used_entry(int max_num)
+{
+	struct fpdb_entry *el = NULL;
+
+	fpdb_iterate(fpdb_find_lest_used_entry, &el);
+
+	if (!el)
+		return;
+
+	pr_info_ratelimited("%s: el=0x%x (%d entries, max=%d)\n",
+		__func__, (unsigned)el, db->num_entries, max_num);
+	nf_ct_iterate_cleanup(&nf_fpdb_del, (void *)el, 0, 0);
+}
+
+/**
+ * Remove all fastpath related connections with the specified network device
+ *
+ * caller should have rtnl locked
+ *
+ * @param dev
+ */
+void fpdb_del_by_dev(struct net_device *dev)
+{
+	nf_ct_iterate_cleanup(&device_cmp, (void *)dev, 0, 0);
+
+	printk(KERN_DEBUG "All entries related to %s deleted\n", dev->name);
+}
+
+/**
+ * Remove all fastpath related connections with the specified port
+ * 
+ * caller should have rtnl locked
+ * 
+ * @param port
+ */
+void fpdb_del_by_port(unsigned int port)
+{
+	nf_ct_iterate_cleanup(&port_cmp, (void *)(unsigned long)port, 0, 0);
+
+	pr_debug("All entries with port=%d deleted\n", port);
+}
+
+/**
+ * flush the entire database by cleaning all fastpath related
+ * conntracks
+ * 
+ * MUST BE CALLED IN PROCESS CONTEXT
+ */
+void fpdb_flush(void)
+{
+	nf_ct_iterate_cleanup(&nf_fp_remove, 0, 0, 0);
+
+	pr_debug("All entries flushed\n");
+}
+
+/**
+ * Iterate through all fpdb entries
+ * MUST BE CALLED IN PROCESS CONTEXT
+ * 
+ * @param iter   callback function called per every entry
+ *               If returns 0, iteration stops.
+ * @param data   private data to be passed to the iter callback
+ */
+void fpdb_iterate(int (*iter)(struct fpdb_entry *e, void *data), void *data)
+{
+	int i;
+	struct fpdb_entry *e;
+
+	for (i = 0; i < db->htable.size; i++) {
+		rcu_read_lock_bh();
+		hlist_for_each_entry_rcu(e, &db->htable.h[i], hlist) {
+			if (iter(e, data))
+				break;
+		}
+		rcu_read_unlock_bh();
+	}
+}
+
+/**
+ * Add the current entry state to the entry's trace buffer when
+ * debug_level mask contains DBG_TRACE_LOG
+ *
+ * @param entry - entry to log
+ * @param tcph - NULL for UDP
+ */
+void fpdb_trace(struct fpdb_entry *entry, struct tcphdr *tcph)
+{
+	if (debug_level & DBG_TRACE_LOG) {
+		struct fpdb_trace *trace = kzalloc(sizeof(struct fpdb_trace), GFP_ATOMIC);
+
+		BUG_ON(!entry);
+
+		trace->timeout = jiffies_to_msecs(entry->ct->timeout - jiffies) / 1000U;
+		trace->ct_status = entry->ct->status;
+		trace->hit_counter = entry->hit_counter;
+
+		if (tcph) {
+			trace->tcp_state = entry->ct->proto.tcp.state;
+			trace->tcph = *tcph;
+		}
+
+		list_add(&trace->list, &entry->debug.trace.list);
+		if (++entry->debug.trace.sz > 5) {
+			/* TODO - change to configurable param */
+			trace = list_entry(entry->debug.trace.list.prev, struct fpdb_trace, list);
+			list_del(entry->debug.trace.list.prev);
+			kfree(trace);
+			entry->debug.trace.sz--;
+		}
+	}
+}
+
+/****************************************************************************
+ *			Fast Path Database private
+ ****************************************************************************/
+
+/* SYS FS and PROC FS */
+
+static void fpdb_get_stats(void)
+{
+	int i, count, max = 0;
+	struct fpdb_entry *el;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	struct hlist_node *h;
+#endif
+
+	memset(db->stats.hist, 0, sizeof(db->stats.hist));
+	db->stats.num_occupied = 0;
+
+	for (i = 0; i < db->htable.size; i++) {
+		count = 0;
+
+		rcu_read_lock_bh();
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+		hlist_for_each_entry_rcu(el, h, &db->htable.h[i], hlist)
+#else
+		hlist_for_each_entry_rcu(el, &db->htable.h[i], hlist)
+#endif
+			count++;
+		rcu_read_unlock_bh();
+
+		if (count)
+			db->stats.num_occupied++;
+
+		if (count < HISTOGRAM_SIZE) {
+			db->stats.hist[count].buckets++;
+			db->stats.hist[count].entries += count;
+		} else {
+			db->stats.hist[HISTOGRAM_SIZE].buckets++;
+			db->stats.hist[HISTOGRAM_SIZE].entries += count;
+		}
+
+		max = (count > max) ? count : max;
+	}
+
+	db->stats.largest_bucket = max;
+}
+
+static ssize_t fpdb_sysfs_flush(struct fastpath_module *m, const char *buf, size_t count)
+{
+	struct net_device *dev;
+
+	if (count > 2) {
+		char *str = kmalloc(sizeof(char)*count, GFP_KERNEL);
+		sprintf(str, "%s", buf);
+		str[count-1] = '\0';
+		dev = dev_get_by_name(&init_net, str);
+		kfree(str);
+
+		if (dev) {
+			fpdb_del_by_dev(dev);
+			dev_put(dev);
+			return count;
+		}
+	}
+
+	fpdb_flush();
+	return count;
+}
+
+static ssize_t fpdb_sysfs_stats_show(struct fastpath_module *m, char *buf)
+{
+	int len, i;
+	u32 sum_pct = 0;
+
+	fpdb_get_stats();
+
+	len = sprintf(buf, "Fast Path Database (HASH) statistics:\n");
+	len += sprintf(buf + len, "Max number of entries: %d ",
+				db->stats.max_entries);
+	len += sprintf(buf + len, "Total lookups: %d, Total hits: %d, "
+		       "hit rate %d%%\n", db->stats.lookups, db->stats.hits,
+		       (100 * db->stats.hits) / (db->stats.lookups ?
+						 db->stats.lookups : 1));
+	len += sprintf(buf + len, "Database Size is %d Buckets\n",
+		       db->htable.size);
+	len += sprintf(buf + len, "Number of occupied buckets: %d\n",
+		       db->stats.num_occupied);
+	len += sprintf(buf + len, "Database contains %d entries\n",
+		       db->num_entries);
+	len += sprintf(buf + len, "Largest bucket contains %d entries\n",
+		       db->stats.largest_bucket);
+	len += sprintf(buf + len, "Load Factor is %d (%d/%d)\n",
+		       db->num_entries /
+		       (db->htable.size ? db->htable.size : 1),
+		       db->num_entries, db->htable.size);
+	len += sprintf(buf + len, "find_entry() iterations/lookups: %d/%d\n",
+		       db->stats.iterations, db->stats.lookups);
+	len += sprintf(buf + len, "Histogram:\n");
+	len += sprintf(buf + len, "Size   buckets   entries   sum-pct\n");
+	for (i = 0; i < HISTOGRAM_SIZE; i++) {
+		if (sum_pct < 100)
+			sum_pct += (100 * db->stats.hist[i].entries) /
+				   (db->num_entries ?
+				    db->num_entries : 1);
+		else
+			sum_pct = 100;
+
+		len += sprintf(buf + len, "%4d%10d%10d%10d\n", i,
+			       db->stats.hist[i].buckets,
+			       db->stats.hist[i].entries, sum_pct);
+	}
+	len += sprintf(buf + len, ">%3d%10d%10d%10d\n",	i - 1,
+		       db->stats.hist[i].buckets,
+		       db->stats.hist[i].entries, 100);
+
+	return len;
+}
+
+static ssize_t fpdb_sysfs_stats_clear(struct fastpath_module *m, const char *buf,
+				   size_t count)
+{
+	pr_debug("reset stats...\n");
+	memset(&db->stats, 0, sizeof(db->stats));
+	return count;
+}
+
+static unsigned int dbg_hash;
+
+static ssize_t fpdb_sysfs_entry_debug_select(struct fastpath_module *m, const char *buf,
+				   size_t count)
+{
+	sscanf(buf, "%u", &dbg_hash);
+	return count;
+}
+
+static ssize_t fpdb_sysfs_entry_debug_show(struct fastpath_module *m, char *buf)
+{
+	struct fpdb_entry *el;
+	int i = 0, len;
+	struct fpdb_trace *itr;
+	struct nf_conn_fastpath *fp_ext;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	struct hlist_node *h;
+#endif
+
+	if (dbg_hash > db->htable.size)
+		return sprintf(buf, "invalid hash (%d)\n", dbg_hash);
+
+	len = sprintf(buf, "debug info for bucket%u:\n", dbg_hash);
+	rcu_read_lock_bh();
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	hlist_for_each_entry_rcu(el, h, &db->htable.h[dbg_hash], hlist) {
+#else
+	hlist_for_each_entry_rcu(el, &db->htable.h[dbg_hash], hlist) {
+#endif
+		len += __fpdb_dump_entry(buf+len, el);
+		fp_ext = nf_ct_ext_find(el->ct, NF_CT_EXT_FASTPATH);
+		BUG_ON(!fp_ext);
+		len += sprintf(buf+len, "fastpath_ext orig:=%p reply=%p\n",
+			       fp_ext->fpd_el[IP_CT_DIR_ORIGINAL],
+			       fp_ext->fpd_el[IP_CT_DIR_REPLY]);
+		if (el->in_tuple.dst.protonum == IPPROTO_UDP)
+			continue;
+		len += sprintf(buf+len, "%d: trace:\n", i++);
+		len += sprintf(buf+len, "hits timeout tcp_state tcp_flags ct_status\n");
+		list_for_each_entry(itr, &el->debug.trace.list, list)
+			len += sprintf(buf+len, "%d %d %s %c%c%c%c%c%c %lu\n",
+				       itr->hit_counter, itr->timeout,
+				       tcp_conntrack_names[itr->tcp_state],
+				       itr->tcph.urg ? 'U' : '-',
+				       itr->tcph.ack ? 'A' : '-',
+				       itr->tcph.psh ? 'P' : '-',
+				       itr->tcph.rst ? 'R' : '-',
+				       itr->tcph.syn ? 'S' : '-',
+				       itr->tcph.fin ? 'F' : '-',
+				       itr->ct_status);
+	}
+	rcu_read_unlock_bh();
+
+	return len;
+}
+
+
+static FP_ATTR(stats, S_IRUGO|S_IWUSR, fpdb_sysfs_stats_show, fpdb_sysfs_stats_clear);
+static FP_ATTR(flush, S_IWUSR, NULL, fpdb_sysfs_flush);
+static FP_ATTR(bucket, S_IRUGO|S_IWUSR, fpdb_sysfs_entry_debug_show, fpdb_sysfs_entry_debug_select);
+
+static struct attribute *fp_database_attrs[] = {
+	&fp_attr_stats.attr,
+	&fp_attr_flush.attr,
+	&fp_attr_bucket.attr,
+	NULL, /* need to NULL terminate the list of attributes */
+};
+
+#ifdef CONFIG_PROC_FS
+static bool first;
+struct fpdb_iter_state {
+	struct seq_net_private p;
+	unsigned int bucket;
+};
+
+static struct hlist_node *fpdb_entries_get_first(struct seq_file *seq)
+{
+	struct fpdb_iter_state *st = seq->private;
+	struct hlist_node *n;
+
+	for (st->bucket = 0; st->bucket < db->htable.size; st->bucket++) {
+		n = rcu_dereference(db->htable.h[st->bucket].first);
+		if (n) {
+			first = true;
+			return n;
+		}
+	}
+	return NULL;
+}
+
+static struct hlist_node *fpdb_entries_get_next(struct seq_file *seq,
+						struct hlist_node *head)
+{
+	struct fpdb_iter_state *st = seq->private;
+
+	first = false;
+	head = rcu_dereference(head->next);
+
+	while (head == NULL) {
+		if (++st->bucket >= db->htable.size)
+			return NULL;
+		head = rcu_dereference(db->htable.h[st->bucket].first);
+	}
+	return head;
+}
+
+static struct hlist_node *fpdb_entries_get_idx(struct seq_file *seq, loff_t pos)
+{
+	struct hlist_node *head = fpdb_entries_get_first(seq);
+
+	if (head)
+		while (pos && (head = fpdb_entries_get_next(seq, head)))
+			pos--;
+	return pos ? NULL : head;
+}
+
+static void *fpdb_seq_start(struct seq_file *seq, loff_t *pos)
+	__acquires(RCU)
+{
+	rcu_read_lock_bh();
+	return fpdb_entries_get_idx(seq, *pos);
+}
+
+static void *fpdb_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return fpdb_entries_get_next(seq, v);
+}
+
+static void fpdb_seq_stop(struct seq_file *seq, void *v)
+	__releases(RCU)
+{
+	rcu_read_unlock_bh();
+}
+
+static int fpdb_seq_show(struct seq_file *s, void *v)
+{
+	struct hlist_node *n = v;
+	struct fpdb_entry *el;
+	char in[256], out[256];
+	unsigned int state, use;
+
+	el = hlist_entry(n, struct fpdb_entry, hlist);
+	if (atomic_inc_not_zero(&el->rc)) {
+		if (first == true) {
+			seq_printf(s, "l2  l3  l4  timeout\thash\thits\tstate  in_dev  out_dev  tuple_in  tuple_out ct block use refcnt\n");
+		}
+		__fp_dump_tuple(in, &el->in_tuple, 0);
+		__fp_dump_tuple(out, &el->out_tuple, 0);
+		state = el->ct->proto.tcp.state;
+		use = atomic_read(&el->ct->ct_general.use);
+		seq_printf(s, "%s  %s  %s  %d\t%d\t%d\t%s  %s  %s  %s  %s  %p  %d  %d  %d"
+#ifdef CONFIG_ASR_TOE
+			" %dKbps"
+#endif
+			"\n",
+				  el->hh.hh_len ? "eth" : "NA",
+				  el->in_tuple.src.l3num == AF_INET6 ?
+				  "ipv6" : "ipv4",
+				  el->in_tuple.dst.protonum == IPPROTO_UDP ?
+				  "udp" : "tcp",
+				  jiffies_to_msecs(el->ct->timeout - jiffies) / 1000U,
+				  el->bucket, el->hit_counter,
+				  el->in_tuple.dst.protonum == IPPROTO_UDP ?
+				  "N/A" : tcp_conntrack_names[state],
+				  el->in_dev->dev->name,
+				  el->out_dev->dev->name,
+				  in, out, el->ct, el->block, use, atomic_read(&el->rc)
+#ifdef CONFIG_ASR_TOE
+				  , el->speed
+#endif
+				  );
+		fpdb_put(el);
+	}
+	return 0;
+}
+
+static const struct seq_operations fpdb_seq_ops = {
+	.start = fpdb_seq_start,
+	.next = fpdb_seq_next,
+	.stop = fpdb_seq_stop,
+	.show = fpdb_seq_show
+};
+
+#endif /* CONFIG_PROC_FS */
+
+static int fpdb_net_init(struct net *net)
+{
+	if (!proc_create_net("fastpath", 0440, net->proc_net, &fpdb_seq_ops,
+			sizeof(struct fpdb_iter_state)))
+
+		return -ENOMEM;
+	return 0;
+}
+
+static void fpdb_net_exit(struct net *net)
+{
+	remove_proc_entry("fastpath", net->proc_net);
+}
+
+static struct pernet_operations fpdb_net_ops = {
+	.init = fpdb_net_init,
+	.exit = fpdb_net_exit,
+};
+
+static void fp_database_release(struct kobject *kobj)
+{
+	struct fastpath_module *module = to_fpmod(kobj);
+	int wait_time = 200;
+
+	fpdb_flush();
+	do {
+		/* wait all fpdb freed, then call kmem_cache_destroy */
+		synchronize_rcu();
+		msleep(10);
+		if (--wait_time <= 0)
+			break;
+	} while (db->num_entries);
+
+	pr_info("%d fpdb entry left\n", db->num_entries);
+	nf_ct_extend_unregister(db->nfct_ext);
+	unregister_pernet_subsys(&fpdb_net_ops);
+	fpdb_free_hashtable(&db->htable);
+	kmem_cache_destroy(db->db_cache);
+#ifdef FP_USE_SRAM_POOL_OPT
+	sram_pool_free((unsigned long)db, sizeof(struct fp_database));
+#else
+	kfree(db);
+#endif
+	kfree(module);
+
+	pr_debug("fp_database released\n");
+}
+
+static struct kobj_type ktype_database = {
+	.sysfs_ops	= &fp_sysfs_ops,
+	.default_attrs	= fp_database_attrs,
+	.release	= fp_database_release,
+};
+
+static void fpdb_destroy_ext(struct nf_conn *ct)
+{
+	struct nf_conn_fastpath *ct_fp;
+	struct fpdb_entry *orig, *reply;
+	BUG_ON(!ct);
+
+	spin_lock_bh(&db->lock);
+	ct_fp = nfct_fastpath(ct);
+	if (ct_fp) {
+		orig = ct_fp->fpd_el[IP_CT_DIR_ORIGINAL];
+		reply = ct_fp->fpd_el[IP_CT_DIR_REPLY];
+	} else {
+		orig = NULL;
+		reply = NULL;
+	}
+
+	if (orig == NULL && reply == NULL) {
+		spin_unlock_bh(&db->lock);
+		return;
+	}
+
+	ct_fp->fpd_el[IP_CT_DIR_ORIGINAL] = NULL;
+	ct_fp->fpd_el[IP_CT_DIR_REPLY] = NULL;
+	if (orig) {
+		FP_DEBUG_DUMP_ENTRY("Delete orig entry:\n", orig);
+		__fpdb_del(orig, true);
+	}
+
+	if (reply) {
+		FP_DEBUG_DUMP_ENTRY("Delete reply entry:\n", reply);
+		__fpdb_del(reply, true);
+	}
+	spin_unlock_bh(&db->lock);
+}
+
+static struct nf_ct_ext_type fpdb_ct_extend = {
+	.len = sizeof(struct nf_conn_fastpath),
+	.align = __alignof__(struct nf_conn_fastpath),
+	.id = NF_CT_EXT_FASTPATH,
+	.destroy = fpdb_destroy_ext,
+};
+
+static int fp_database_probe(struct fastpath_module *module)
+{
+	struct fp_database *priv;
+	int ret;
+
+#ifdef FP_USE_SRAM_POOL_OPT
+	priv = (struct fp_database *)sram_pool_alloc(sizeof(struct fp_database));
+#else
+	priv = kzalloc(sizeof(struct fp_database), GFP_KERNEL);
+#endif
+	if (!priv) {
+		pr_err("no memory\n");
+		return -ENOMEM;
+	}
+	spin_lock_init(&priv->lock);
+	get_random_bytes(&hash_rnd, sizeof(hash_rnd));
+
+	priv->db_cache = kmem_cache_create("fpdb_entry",
+			sizeof(struct fpdb_entry), 0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!priv->db_cache) {
+		pr_err("kmem_cache_create fpdb_entry failed\n");
+		ret = -ENOMEM;
+		goto kfree_priv;
+	}
+
+	ret = fpdb_alloc_hashtable(&priv->htable);
+	if (ret < 0) {
+		pr_err("fpdb_alloc_hashtable failed (ret=%d)\n", ret);
+		goto kfree_cache;
+	}
+
+	ret = register_pernet_subsys(&fpdb_net_ops);
+	if (ret < 0) {
+		pr_err("cannot register pernet operations (ret=%d)\n", ret);
+		goto free_hashtable;
+	}
+
+	priv->nfct_ext = &fpdb_ct_extend;
+	ret = nf_ct_extend_register(priv->nfct_ext);
+	if (ret < 0) {
+		pr_err("nf_ct_extend_register failed (%d)\n", ret);
+		goto unreg_pernet;
+	}
+
+	db = module->priv = priv;
+	snprintf(module->name, sizeof(module->name), "fp_database");
+
+	kobject_init(&module->kobj, &ktype_database);
+	ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
+	if (ret < 0) {
+		pr_err("kobject_add failed (%d)\n", ret);
+		goto nf_ct_extend_unreg;
+	}
+	kobject_uevent(&module->kobj, KOBJ_ADD);
+
+	pr_debug("fp_database probed\n");
+	return 0;
+
+nf_ct_extend_unreg:
+	kobject_put(&module->kobj);
+	nf_ct_extend_unregister(priv->nfct_ext);
+unreg_pernet:
+	unregister_pernet_subsys(&fpdb_net_ops);
+free_hashtable:
+	fpdb_free_hashtable(&priv->htable);
+kfree_cache:
+	kmem_cache_destroy(priv->db_cache);
+kfree_priv:
+#ifdef FP_USE_SRAM_POOL_OPT
+	sram_pool_free((unsigned long)priv, sizeof(struct fp_database));
+#else
+	kfree(priv);
+#endif
+	return ret;
+}
+
+static int fp_database_remove(struct fastpath_module *module)
+{
+	kobject_put(&module->kobj);
+
+	pr_debug("fp_database removed\n");
+	return 0;
+}
+
+struct fastpath_module_ops fp_database_ops = {
+	.probe = fp_database_probe,
+	.remove = fp_database_remove,
+};
+
diff --git a/package/kernel/mfp/files/fp_device.c b/package/kernel/mfp/files/fp_device.c
new file mode 100644
index 0000000..f7d4ad8
--- /dev/null
+++ b/package/kernel/mfp/files/fp_device.c
@@ -0,0 +1,691 @@
+/*
+ *	Fastpath Devices
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU FP_ERR( Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "mfp" " device:%s:%d: " fmt, __func__, __LINE__
+
+#include <net/ipv6.h>
+#include <linux/inet.h>
+#include "fp_common.h"
+#include "fp_device.h"
+#include "fp_core.h"
+#include "fp_ndisc.h"
+
+static struct fastpath_module *fp_device;
+
+#define STATS_TITLE_FMT \
+	"%-13.13s:              total              Slow                Fast\n"
+#define STATS_DATA_FMT \
+	"   %-10.10s:       %10llu       %10llu          %10lu\n"
+
+static unsigned long long inline stats_diff(unsigned long long slow, unsigned long fast)
+{
+	return (slow < fast) ? 0 : slow - fast;
+}
+
+static int
+add_stats_to_buff(char *buf, struct fp_net_device *fdev, ssize_t max_size)
+{
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *stats;
+	struct fp_net_device_stats *stats_fast;
+	static const char *title_fmt = STATS_TITLE_FMT;
+	static const char *data_fmt = STATS_DATA_FMT;
+	int len;
+
+	stats = dev_get_stats(fdev->dev, &temp);
+
+	stats_fast = &fdev->stats;
+
+	len = scnprintf(buf, max_size, title_fmt, fdev->dev->name);
+
+	len += scnprintf(buf + len, max_size - len, data_fmt, "queue_stopped",
+			   0llu, 0llu, stats_fast->queue_stopped);
+	len += scnprintf(buf + len, max_size - len, data_fmt, "rx_packets",
+			   stats->rx_packets, stats_diff(stats->rx_packets, stats_fast->rx_packets) ,stats_fast->rx_packets);
+	len += scnprintf(buf + len, max_size - len, data_fmt, "rx_bytes",
+			   stats->rx_bytes, stats_diff(stats->rx_bytes, stats_fast->rx_bytes), stats_fast->rx_bytes);
+	len += scnprintf(buf + len, max_size - len, data_fmt, "rx_errors",
+			   stats->rx_errors, stats_diff(stats->rx_errors, stats_fast->rx_errors), stats_fast->rx_errors);
+	len += scnprintf(buf + len, max_size - len, data_fmt, "rx_dropped",
+			   stats->rx_dropped, stats_diff(stats->rx_dropped, stats_fast->rx_dropped), stats_fast->rx_dropped);
+	len += scnprintf(buf + len, max_size - len, data_fmt, "tx_packets",
+			   stats->tx_packets, stats_diff(stats->tx_packets, stats_fast->tx_packets), stats_fast->tx_packets);
+	len += scnprintf(buf + len, max_size - len, data_fmt, "tx_bytes",
+			   stats->tx_bytes, stats_diff(stats->tx_bytes, stats_fast->tx_bytes), stats_fast->tx_bytes);
+	len += scnprintf(buf + len, max_size - len, data_fmt, "tx_errors",
+			   stats->tx_errors, stats_diff(stats->tx_errors, stats_fast->tx_errors), stats_fast->tx_errors);
+	len += scnprintf(buf + len, max_size - len, data_fmt, "tx_dropped",
+			   stats->tx_dropped, stats_diff(stats->tx_dropped, stats_fast->tx_dropped), stats_fast->tx_dropped);
+	return len;
+}
+
+static int
+add_status_to_buff(char *buf, struct fp_net_device *fdev, ssize_t max_size)
+{
+	return scnprintf(buf, max_size, "%16s%8s%11s%9d%9d%9s\n",
+			fdev->dev->name,
+			netif_running(fdev->dev) ? "Up" : "Down",
+			fdev->forward ? "enabled" : "disabled",
+			atomic_read(&fdev->refcnt),
+			netdev_refcnt_read(fdev->dev),
+			fdev->br ? fdev->br->name : "NA");
+}
+
+static inline bool ip6addr_is_empty(struct in6_addr *addr)
+{
+	return !addr->in6_u.u6_addr32[0] &&
+	       !addr->in6_u.u6_addr32[1] &&
+	       !addr->in6_u.u6_addr32[2] &&
+	       !addr->in6_u.u6_addr32[3];
+}
+
+static ssize_t fdev_forward_show(struct fp_net_device *fdev, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%s\n", fdev->forward ? "Enabled" : "Disabled");
+}
+
+static ssize_t fdev_forward_store(struct fp_net_device *fdev,
+				   const char *buf, size_t count)
+{
+	unsigned int forward;
+
+	if (sscanf(buf, "%u", &forward) != 1)
+		return -EINVAL;
+
+	fdev->forward = (bool)forward;
+
+	return count;
+}
+
+/**
+ * show statistics
+ */
+static ssize_t fdev_stats_show(struct fp_net_device *fdev, char *buf)
+{
+	struct net_device_stats *stats_slow = &fdev->dev->stats;
+	struct fp_net_device_stats *stats_fast = &fdev->stats;
+
+	if (fdev->dev->netdev_ops && fdev->dev->netdev_ops->ndo_get_stats)
+		stats_slow = fdev->dev->netdev_ops->ndo_get_stats(fdev->dev);
+	stats_fast = &fdev->stats;
+
+	return add_stats_to_buff(buf, fdev, PAGE_SIZE - 1);;
+}
+
+/**
+ * clear statistics
+ * 0 - clear fast stats only
+ * 1 - clear slow & fast stats
+ */
+static ssize_t fdev_stats_store(struct fp_net_device *fdev,
+				   const char *buf, size_t count)
+{
+	struct net_device_stats *stats_slow = &fdev->dev->stats;
+	struct fp_net_device_stats *stats_fast = &fdev->stats;
+	unsigned int op;
+
+	if (sscanf(buf, "%u", &op) != 1 || op > 1)
+		return -EINVAL;
+
+	if (fdev->dev->netdev_ops && fdev->dev->netdev_ops->ndo_get_stats)
+		stats_slow = fdev->dev->netdev_ops->ndo_get_stats(fdev->dev);
+	stats_fast = &fdev->stats;
+
+	memset(stats_fast,0,sizeof(struct fp_net_device_stats));
+	if (op)
+		memset(stats_slow,0,sizeof(struct net_device_stats));
+
+	return count;
+}
+
+/**
+ * show status
+ */
+static ssize_t fdev_status_show(struct fp_net_device *fdev, char *buf)
+{
+	int len;
+
+	len = scnprintf(buf, PAGE_SIZE, "          device   state    forward   refcnt   dev_ref   bridge\n");
+	return len + add_status_to_buff(buf + len, fdev, PAGE_SIZE - len -1);
+}
+
+static ssize_t fpdev_prefixlen_store(struct fp_net_device *fpdev,
+					const char *buf, size_t count)
+{
+	int pref;
+	sscanf(buf, "%d\n", &pref);
+
+	fpdev->prefixlen = pref;
+
+	return count;
+}
+
+static ssize_t fpdev_prefixlen_show(struct fp_net_device *fpdev, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", fpdev->prefixlen);
+}
+
+static ssize_t fpdev_ll6addr_store(struct fp_net_device *fpdev,
+					const char *buf, size_t count)
+{
+	in6_pton(buf, -1, (u8 *)&fpdev->ll6addr.s6_addr, -1, NULL);
+
+	if (ip6addr_is_empty(&fpdev->ll6addr))
+		fpdev_clear_ll6(fpdev);
+	else
+		fpdev_set_ll6(fpdev);
+
+	memset(&fpdev->gb6addr, 0, sizeof(struct in6_addr));
+	fpdev->prefixlen = 0;
+	fpdev->mtu = 0;
+	fpdev_clear_gb6(fpdev);
+	fpdev_clear_mtu(fpdev);
+
+	return count;
+}
+
+static ssize_t fpdev_ll6addr_show(struct fp_net_device *fpdev, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%pI6c\n", &fpdev->ll6addr);
+}
+
+static ssize_t fpdev_gb6addr_store(struct fp_net_device *fpdev,
+					const char *buf, size_t count)
+{
+	in6_pton(buf, -1, (u8 *)&fpdev->gb6addr.s6_addr, -1, NULL);
+
+	fpdev_set_gb6(fpdev);
+	return count;
+}
+
+static ssize_t fpdev_gb6addr_show(struct fp_net_device *fpdev, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%pI6c\n", &fpdev->gb6addr);
+}
+
+static ssize_t fpdev_mtu_store(struct fp_net_device *fpdev,
+					const char *buf, size_t count)
+{
+	u32 mtu;
+	sscanf(buf, "%d\n", &mtu);
+
+	fpdev->mtu = mtu;
+
+	return count;
+}
+
+
+static ssize_t fpdev_mtu_show(struct fp_net_device *fpdev, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", fpdev->mtu);
+}
+
+struct fp_dev_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct fp_net_device *, char *);
+	ssize_t (*store)(struct fp_net_device *, const char *, size_t count);
+};
+
+#define FPDEV_ATTR(_name, _mode, _show, _store) \
+	struct fp_dev_attr fp_dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+#define to_fpdev(fpdev) container_of(fpdev, struct fp_net_device, kobj)
+#define to_attr(a) container_of(a, struct fp_dev_attr, attr)
+
+static FPDEV_ATTR(forward, S_IRUGO|S_IWUSR, fdev_forward_show, fdev_forward_store);
+static FPDEV_ATTR(statistics, S_IRUGO|S_IWUSR, fdev_stats_show, fdev_stats_store);
+static FPDEV_ATTR(status, S_IRUGO, fdev_status_show, NULL);
+static FPDEV_ATTR(ll6addr, S_IRUGO|S_IWUSR, fpdev_ll6addr_show,
+		   fpdev_ll6addr_store);
+static FPDEV_ATTR(gb6addr, S_IRUGO|S_IWUSR, fpdev_gb6addr_show,
+		   fpdev_gb6addr_store);
+static FPDEV_ATTR(prefixlen, S_IRUGO|S_IWUSR, fpdev_prefixlen_show,
+		   fpdev_prefixlen_store);
+static FPDEV_ATTR(mtu, S_IRUGO|S_IWUSR, fpdev_mtu_show,
+		   fpdev_mtu_store);
+
+static struct attribute *fpdev_default_attrs[] = {
+	&fp_dev_attr_forward.attr,
+	&fp_dev_attr_statistics.attr,
+	&fp_dev_attr_status.attr,
+	&fp_dev_attr_ll6addr.attr,
+	&fp_dev_attr_gb6addr.attr,
+	&fp_dev_attr_prefixlen.attr,
+	&fp_dev_attr_mtu.attr,
+	NULL
+};
+
+static ssize_t fpdev_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+	struct fp_net_device *fdev = to_fpdev(kobj);
+	struct fp_dev_attr *fattr = to_attr(attr);
+
+	if (!fdev || !fattr || !fattr->show)
+		return -EINVAL;
+
+	return fattr->show(fdev, buf);
+}
+
+static ssize_t fpdev_store(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count)
+{
+	struct fp_net_device *fdev = to_fpdev(kobj);
+	struct fp_dev_attr *fattr = to_attr(attr);
+
+	if (!fdev || !fattr || !fattr->store)
+		return -EINVAL;
+	
+	return fattr->store(fdev, buf, count);
+}
+
+static const struct sysfs_ops fpdev_sysfs_ops = {
+	.show = fpdev_show,
+	.store = fpdev_store,
+};
+
+void destroy_fpdev(struct work_struct *w)
+{
+	struct fp_dev_work *work;
+	struct fp_net_device *fpdev;
+	struct fp_dev_list *fpdl;
+
+	work = container_of(w, struct fp_dev_work, work.work);
+	BUG_ON(!work);
+
+	fpdev = work->fpdev;
+	fpdl = work->fpdl;
+
+	pr_err("device (%s) destroyed\n", fpdev->dev->name);
+
+	rtnl_lock();
+	dev_put(fpdev->dev);
+	rtnl_unlock();
+
+	kfree(fpdev);
+	atomic_dec(&fpdl->dev_count);
+	wake_up(&fpdl->wq);
+
+	list_del(&work->list);
+	kfree(work);
+
+}
+
+void destroy_fpdev_rcu(struct rcu_head *rcu)
+{
+	struct fp_dev_work *work;
+	struct fp_net_device *fpdev =
+		container_of(rcu, struct fp_net_device, rcu);
+	struct fp_dev_list *fpdl = fp_device->priv;
+
+	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return;
+
+	work->fpdev = fpdev;
+	work->fpdl = fpdl;
+
+	INIT_LIST_HEAD(&work->list);
+	INIT_DELAYED_WORK(&work->work, destroy_fpdev);
+	queue_delayed_work(fpdl->dev_put_wq, &work->work, 0);
+}
+
+static void release_fpdev(struct kobject *kobj)
+{
+	pr_debug("fpdev kobj released\n");
+}
+
+static struct kobj_type ktype_fpdev = {
+	.sysfs_ops = &fpdev_sysfs_ops,
+	.default_attrs = fpdev_default_attrs,
+	.release = release_fpdev,
+};
+
+static void fpdev_del_if_finish(struct work_struct *work)
+{
+	struct fp_net_device *fpdev;
+
+	fpdev = container_of(work, struct fp_net_device, free_work);
+
+	kobject_put(&fpdev->kobj);
+	fpdev_put(fpdev);
+}
+
+/*--------------------------------------------------------------*/
+/*-				API				-*/
+/*--------------------------------------------------------------*/
+
+/**
+ * delete the fastpath device associated with this net device
+ * 
+ * @param dev    net device
+ * 
+ * @return 0 for success, -ENODEV if not found
+ */
+int fpdev_del_if(struct net_device *dev)
+{
+	struct fp_net_device *fpdev;
+	struct fp_dev_list *fpdl = fp_device->priv;
+
+	spin_lock_bh(&fpdl->list_lock);
+	rcu_read_lock_bh();
+	list_for_each_entry_rcu(fpdev, &fpdl->devices_list, list) {
+		if (fpdev->dev == dev && fpdev_hold(fpdev))
+			goto found;
+	}
+
+	fpdev = NULL;
+
+found:
+	rcu_read_unlock_bh();
+
+	if (!fpdev) {
+		pr_debug("device (%s) not found\n", dev->name);
+		spin_unlock_bh(&fpdl->list_lock);
+		return -ENODEV;
+	}
+
+	list_del_rcu(&fpdev->list);
+	spin_unlock_bh(&fpdl->list_lock);
+
+	fpdev_put(fpdev);
+	schedule_work(&fpdev->free_work);
+
+	fpdev_put(fpdev);
+
+	printk(KERN_DEBUG "device (%s) found and deleted\n", dev->name);
+	return 0;
+}
+
+/**
+ * create and add a fastpath device for a given interface
+ * 
+ * @param dev    net device
+ * 
+ * @return 0 for success, error code otherwise
+ */
+int fpdev_add_if(struct net_device *dev)
+{
+	struct fp_net_device *fpdev;
+	struct fp_dev_list *fpdl;
+	int ret;
+
+	BUG_ON(!dev);
+	BUG_ON(!fp_device);
+
+	fpdl = fp_device->priv;
+
+	fpdev = kzalloc(sizeof(*fpdev), GFP_ATOMIC);
+	if (!fpdev) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	dev_hold(dev);
+
+	ret = kobject_init_and_add(&fpdev->kobj, &ktype_fpdev, &fp_device->kobj,
+				   dev->name);
+	if (ret)
+		goto kobj_err;
+
+
+	fpdev->forward = true;
+	fpdev->dev = dev;
+	INIT_LIST_HEAD(&fpdev->list);
+	INIT_WORK(&fpdev->free_work, fpdev_del_if_finish);
+
+	/* extra reference for return */
+	atomic_set(&fpdev->refcnt, 2);
+	atomic_inc(&fpdl->dev_count);
+
+	spin_lock_bh(&fpdl->list_lock);
+	list_add_tail_rcu(&fpdev->list, &fpdl->devices_list);
+	spin_unlock_bh(&fpdl->list_lock);
+
+	kobject_uevent(&fpdev->kobj, KOBJ_ADD);
+	
+	pr_debug("created fastpath device for %s\n", dev->name);
+
+	return 0;
+
+kobj_err:
+	kobject_put(&fpdev->kobj);
+	dev_put(dev);
+	kfree(fpdev);
+err:
+	pr_err("could not creat fastpath device for %s\n", dev->name);
+	return ret;
+}
+
+/**
+ * search for a fastpath device associated with a given net device.
+ * If found, the fastpath device's refcount is incremented.
+ * The user must call fpdev_put() when finished in order to release the device.
+ * 
+ * @param dev    net device
+ * 
+ * @return pointer to the associated fastpath device (NULL if not found)
+ */
+struct fp_net_device *fpdev_get_if(struct net_device *dev)
+{
+	struct fp_net_device *fpdev;
+	struct fp_dev_list *fpdl = fp_device->priv;
+
+	rcu_read_lock_bh();
+	list_for_each_entry_rcu(fpdev, &fpdl->devices_list, list) {
+		if (fpdev->dev == dev && atomic_inc_not_zero(&fpdev->refcnt))
+			goto found;
+	}
+
+	fpdev = NULL;
+	printk(KERN_DEBUG "device (%s) not found\n", dev->name);
+
+found:
+	rcu_read_unlock_bh();
+	return fpdev;
+}
+
+struct fp_net_device *fpdev_get_ccinet(void)
+{
+	struct fp_net_device *fpdev;
+	struct fp_dev_list *fpdl = fp_device->priv;
+
+	rcu_read_lock_bh();
+	list_for_each_entry_rcu(fpdev, &fpdl->devices_list, list) {
+		if (fpdev_is_gb6_set(fpdev) && fpdev_is_ll6_set(fpdev) &&
+			(!strncasecmp(fpdev->dev->name, "ccinet", 6)) &&
+			fpdev_is_mtu_set(fpdev) && atomic_inc_not_zero(&fpdev->refcnt))
+			goto found;
+	}
+
+	fpdev = NULL;
+
+found:
+	rcu_read_unlock_bh();
+	return fpdev;
+}
+
+/**
+ * show statistics (all fastpath devices)
+ */
+static ssize_t stats_show(struct fastpath_module *m, char *buf)
+{
+	struct fp_net_device *itr;
+	int len, res;
+	struct fp_dev_list *fpdl = fp_device->priv;
+
+	len = sprintf(buf, "fastpath statistics\n");
+
+	spin_lock_bh(&fpdl->list_lock);
+	list_for_each_entry(itr, &fpdl->devices_list, list) {
+		if (!netif_running(itr->dev) || !fpdev_hold(itr))
+			continue;
+		res = add_stats_to_buff(buf + len, itr, PAGE_SIZE - len - 1);
+		fpdev_put(itr);
+		len += res;
+		if (res == 0) {
+			pr_info("Exceed PAGE_SIZE, result trancated\n");
+			len += sprintf(buf + len, "\n");
+			break;
+		}
+	}
+	spin_unlock_bh(&fpdl->list_lock);
+
+	return len;
+}
+
+/**
+ * clear statistics (all fastpath devices)
+ * 0 - clear fast stats only
+ * 1 - clear slow & fast stats
+ */
+static ssize_t stats_store(struct fastpath_module *m, const char *buf,
+			    size_t count)
+{
+	struct fp_net_device *itr;
+	struct net_device_stats *stats_slow;
+	struct fp_net_device_stats *stats_fast;
+	unsigned int op;
+	struct fp_dev_list *fpdl = fp_device->priv;
+
+	if (sscanf(buf, "%u", &op) != 1 || op > 1)
+		return -EINVAL;
+
+	spin_lock_bh(&fpdl->list_lock);
+	list_for_each_entry(itr, &fpdl->devices_list, list) {
+		BUG_ON(!itr->dev);
+		if (!fpdev_hold(itr))
+			continue;
+		stats_slow = &itr->dev->stats;
+		stats_fast = &itr->stats;
+		if (itr->dev->netdev_ops && itr->dev->netdev_ops->ndo_get_stats)
+			stats_slow = itr->dev->netdev_ops->ndo_get_stats(itr->dev);
+
+		memset(stats_fast,0,sizeof(struct fp_net_device_stats));
+		if (op)
+			memset(stats_slow,0,sizeof(struct net_device_stats));
+
+		fpdev_put(itr);
+	}
+	spin_unlock_bh(&fpdl->list_lock);
+
+	return count;
+}
+
+/**
+ * show status (all fastpath devices)
+ */
+static ssize_t status_show(struct fastpath_module *m, char *buf)
+{
+	struct fp_net_device *itr;
+	struct fp_dev_list *fpdl = fp_device->priv;
+	int len = 0;
+
+	len = scnprintf(buf, PAGE_SIZE, "          device   state    forward   refcnt   dev_ref   bridge\n");
+
+	/* active devices */
+	rcu_read_lock_bh();
+	list_for_each_entry_rcu(itr, &fpdl->devices_list, list)
+		len += add_status_to_buff(buf + len, itr, PAGE_SIZE - len -1);
+	rcu_read_unlock_bh();
+
+	return len;
+}
+
+static void fp_device_release(struct kobject *kobj)
+{
+	struct fp_dev_list *fpdl = fp_device->priv;
+	BUG_ON(!list_empty(&fpdl->devices_list));
+	pr_debug("fp_device released\n");
+}
+
+static FP_ATTR(devices, S_IRUGO, status_show, NULL);
+static FP_ATTR(stats, S_IRUGO|S_IWUSR, stats_show, stats_store);
+
+static struct attribute *fp_device_attrs[] = {
+	&fp_attr_devices.attr,
+	&fp_attr_stats.attr,
+	NULL, /* need to NULL terminate the list of attributes */
+};
+
+static struct kobj_type ktype_devices = {
+	.sysfs_ops	= &fp_sysfs_ops,
+	.default_attrs	= fp_device_attrs,
+	.release	= fp_device_release,
+};
+
+static int fp_device_probe(struct fastpath_module *module)
+{
+	int ret;
+	struct fp_dev_list *fpdl;
+
+	snprintf(module->name, sizeof(module->name),"fp_device");
+
+	fpdl = kzalloc(sizeof(*fpdl), GFP_KERNEL);
+	if (!fpdl) {
+		pr_err("fp_dev_list alloc failed\n");
+		return -ENOMEM;
+	}
+
+	kobject_init(&module->kobj, &ktype_devices);
+	ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
+	if (ret < 0) {
+		pr_err("kobject_add failed (%d)\n", ret);
+		goto kobj_err;
+	}
+
+	atomic_set(&fpdl->dev_count, 0);
+	INIT_LIST_HEAD(&fpdl->devices_list);
+	spin_lock_init(&fpdl->list_lock);
+	init_waitqueue_head(&fpdl->wq);
+
+	fpdl->dev_put_wq = create_singlethread_workqueue(module->name);
+	if (!fpdl->dev_put_wq) {
+		pr_err("create workqueue failed\n");
+		ret = -EBUSY;
+		goto kobj_err;
+	}
+
+	module->priv = fpdl;
+	fp_device = module;
+
+	kobject_uevent(&module->kobj, KOBJ_ADD);
+
+	pr_debug("fp_device probed\n");
+	return 0;
+
+kobj_err:
+	kobject_put(&module->kobj);
+	kfree(fpdl);
+	return ret;
+}
+
+static int fp_device_remove(struct fastpath_module *module)
+{
+	struct fp_dev_list *fpdl = fp_device->priv;
+
+	BUG_ON(!module);
+
+	flush_workqueue(fpdl->dev_put_wq);
+	wait_event(fpdl->wq, !atomic_read(&fpdl->dev_count));
+	destroy_workqueue(fpdl->dev_put_wq);
+
+	kobject_put(&module->kobj);
+	fp_device = NULL;
+
+	kfree(module->priv);
+	kfree(module);
+
+	pr_debug("fp_device removed\n");
+	return 0;
+}
+
+struct fastpath_module_ops fp_device_ops = {
+	.probe = fp_device_probe,
+	.remove = fp_device_remove,
+};
diff --git a/package/kernel/mfp/files/fp_device.h b/package/kernel/mfp/files/fp_device.h
new file mode 100644
index 0000000..adef8ac
--- /dev/null
+++ b/package/kernel/mfp/files/fp_device.h
@@ -0,0 +1,123 @@
+#ifndef __FP_DEVICE_H__
+#define __FP_DEVICE_H__
+
+#define FP_DEV_MASK_GB6_SET		0
+#define FP_DEV_MASK_LL6_SET		1
+#define FP_DEV_MASK_MTU_SET		2
+
+struct fp_dev_list {
+	struct list_head devices_list;
+	spinlock_t list_lock;
+	wait_queue_head_t wq;
+	atomic_t dev_count;
+	struct workqueue_struct *dev_put_wq;
+};
+
+struct fp_dev_work {
+	struct list_head list;
+	struct fp_dev_list *fpdl;
+	struct delayed_work work;
+	struct fp_net_device *fpdev;
+};
+
+struct fp_net_device_stats {
+	unsigned long	rx_packets;
+	unsigned long	tx_packets;
+	unsigned long	rx_bytes;
+	unsigned long	tx_bytes;
+	unsigned long	rx_errors;
+	unsigned long	tx_errors;
+	unsigned long	rx_dropped;
+	unsigned long	tx_dropped;
+	unsigned long	queue_stopped;
+};
+
+struct fp_net_device {
+	struct kobject kobj;
+	struct rcu_head rcu;
+	struct list_head list;
+	struct work_struct free_work;
+	struct net_device *dev; /* associated net_device */
+	struct net_device *br; /* bridge to which this device is attached */
+	atomic_t refcnt;
+	unsigned long flags;
+	bool forward;	/* fastpath forwarding enabled/disabled */
+	struct fp_net_device_stats stats;
+	struct in6_addr ll6addr;
+	struct in6_addr gb6addr;
+	u8 prefixlen;
+	u32 mtu;
+};
+
+int fpdev_add_if(struct net_device *dev);
+int fpdev_del_if(struct net_device *dev);
+struct fp_net_device *fpdev_get_if(struct net_device *dev);
+struct fp_net_device *fpdev_get_ccinet(void);
+void destroy_fpdev_rcu(struct rcu_head *rcu);
+
+static inline int fpdev_cmp_if(struct fp_net_device *fdev, struct net_device *dev)
+{
+	return (fdev->dev == dev);
+}
+
+/* increment reference to a fastpath device */
+static inline struct fp_net_device *fpdev_hold(struct fp_net_device *fpdev)
+{
+	if (fpdev && !atomic_inc_not_zero(&fpdev->refcnt))
+		return NULL;
+	return fpdev;
+}
+
+/* decrement reference to a fastpath device */
+static inline void fpdev_put(struct fp_net_device *fpdev)
+{
+	if (fpdev && atomic_dec_and_test(&fpdev->refcnt))
+		call_rcu(&fpdev->rcu, destroy_fpdev_rcu);
+}
+
+static inline void fpdev_set_gb6(struct fp_net_device *fpdev)
+{
+	set_bit(FP_DEV_MASK_GB6_SET, &fpdev->flags);
+}
+
+static inline int fpdev_is_gb6_set(struct fp_net_device *fpdev)
+{
+	return test_bit(FP_DEV_MASK_GB6_SET, &fpdev->flags);
+}
+
+static inline void fpdev_clear_gb6(struct fp_net_device *fpdev)
+{
+	clear_bit(FP_DEV_MASK_GB6_SET, &fpdev->flags);
+}
+
+static inline void fpdev_set_ll6(struct fp_net_device *fpdev)
+{
+	set_bit(FP_DEV_MASK_LL6_SET, &fpdev->flags);
+}
+
+static inline int fpdev_is_ll6_set(struct fp_net_device *fpdev)
+{
+	return test_bit(FP_DEV_MASK_LL6_SET, &fpdev->flags);
+}
+
+static inline void fpdev_clear_mtu(struct fp_net_device *fpdev)
+{
+	clear_bit(FP_DEV_MASK_MTU_SET, &fpdev->flags);
+}
+
+static inline void fpdev_set_mtu(struct fp_net_device *fpdev)
+{
+	set_bit(FP_DEV_MASK_MTU_SET, &fpdev->flags);
+}
+
+static inline int fpdev_is_mtu_set(struct fp_net_device *fpdev)
+{
+	return test_bit(FP_DEV_MASK_MTU_SET, &fpdev->flags);
+}
+
+static inline void fpdev_clear_ll6(struct fp_net_device *fpdev)
+{
+	clear_bit(FP_DEV_MASK_LL6_SET, &fpdev->flags);
+}
+
+#endif
diff --git a/package/kernel/mfp/files/fp_forward.c b/package/kernel/mfp/files/fp_forward.c
new file mode 100644
index 0000000..a130630
--- /dev/null
+++ b/package/kernel/mfp/files/fp_forward.c
@@ -0,0 +1,1086 @@
+/*
+ *	Fast path Forward
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "mfp" " forward: %s:%d: " fmt, __func__, __LINE__
+
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <linux/relay.h>
+#include <linux/debugfs.h>
+#include <linux/skbrb.h>
+
+#include "fp_common.h"
+#include "fp_classifier.h"
+#include "fp_database.h"
+#include "fp_device.h"
+#include "fp_core.h"
+#include "fp_ndisc.h"
+
+/**
+ * For netif_rx called in interrupt or irq_disabled, direct hook can't use.
+ * So enable netfilter hook for this situation.
+ */
+//#define FP_RX_IN_INTR_TO_NETFILTER
+
+/* function prototypes */
+static int fp_forward_direct(struct sk_buff *skb);
+static int fp_forward_queue(struct sk_buff *skb);
+static unsigned int fp_forward_nf_hook(void *priv,
+			       struct sk_buff *skb,
+			       const struct nf_hook_state *state);
+
+static int fp_forward_output(struct sk_buff *skb);
+static int fp_forward_netif_rx(struct sk_buff *skb);
+
+#ifdef CONFIG_ASR_TOE
+extern int fp_cm_genl_send_tuple(struct nf_conntrack_tuple *tuple, struct fpdb_entry *el,
+									 int add, int len);
+#endif
+
+struct rx_hook_struct {
+	const char *name;
+	int (*connect)(void);
+	void (*disconnect)(void);
+};
+
+struct tx_hook_struct {
+	const char *name;
+	int (*output)(struct sk_buff *);
+};
+
+struct fp_forward {
+	spinlock_t lock;
+	struct rx_hook_struct *rx_hook;
+	struct tx_hook_struct *tx_hook;
+};
+
+static int (*output)(struct sk_buff *); /* global output function pointer */
+static int drop_on_busy = 1; /* drop packets if output dev is busy */
+static int bypass_fastpath = 0;
+static int reply_ra;
+static unsigned int pkt_debug_level;
+static struct rchan *fp_chan;
+static struct dentry *fp_dir;
+
+/*--------------------------------------*/
+/*-------------- RX HOOKS --------------*/
+/*--------------------------------------*/
+
+/** netif_rx hook */
+static int netif_rx_hook_connect(void)
+{
+	netif_rx_fastpath_register(&fp_forward_netif_rx);
+	return 0;
+}
+static void netif_rx_hook_disconnect(void)
+{
+	netif_rx_fastpath_unregister();
+}
+
+/** netfilter rx_hook */
+static struct nf_hook_ops nf_rx_hook_data[] __read_mostly = {
+	{
+		.hook = fp_forward_nf_hook,
+		.pf = NFPROTO_IPV4,
+		.hooknum = NF_INET_PRE_ROUTING,
+		.priority = NF_IP_PRI_FIRST,
+	},
+	{
+		.hook = fp_forward_nf_hook,
+		.pf = NFPROTO_IPV6,
+		.hooknum = NF_INET_PRE_ROUTING,
+		.priority = NF_IP6_PRI_FIRST,
+	},
+};
+
+static int nf_rx_hook_connect(void)
+{
+	return nf_register_net_hooks(&init_net, nf_rx_hook_data, ARRAY_SIZE(nf_rx_hook_data));
+}
+
+static void nf_rx_hook_disconnect(void)
+{
+	nf_unregister_net_hooks(&init_net, nf_rx_hook_data, ARRAY_SIZE(nf_rx_hook_data));
+}
+
+#define RX_HOOK_NETIF		(0)
+#define RX_HOOK_NETFILTER	(1)
+#define RX_HOOK_NONE		(2)
+
+static struct rx_hook_struct rx_hooks[] = {
+	[RX_HOOK_NETIF].name		= "direct (netif_rx)",
+	[RX_HOOK_NETIF].connect		= &netif_rx_hook_connect,
+	[RX_HOOK_NETIF].disconnect	= &netif_rx_hook_disconnect,
+	[RX_HOOK_NETFILTER].name	= "netfilter (NF_INET_PRE_ROUTING)",
+	[RX_HOOK_NETFILTER].connect	= &nf_rx_hook_connect,
+	[RX_HOOK_NETFILTER].disconnect	= &nf_rx_hook_disconnect,
+	[RX_HOOK_NONE].name		= "disconnected",
+};
+
+/*--------------------------------------*/
+/*-------------- TX HOOKS --------------*/
+/*--------------------------------------*/
+
+#define TX_HOOK_NDO_START_XMIT		(0)
+#define TX_HOOK_DEV_QUEUE_XMIT		(1)
+#define TX_HOOK_NONE			(2)
+
+static struct tx_hook_struct tx_hooks[] = {
+	[TX_HOOK_NDO_START_XMIT].name	= "direct (ndo_start_xmit)",
+	[TX_HOOK_NDO_START_XMIT].output	= &fp_forward_direct,
+	[TX_HOOK_DEV_QUEUE_XMIT].name	= "queue (dev_queue_xmit)",
+	[TX_HOOK_DEV_QUEUE_XMIT].output	= &fp_forward_queue,
+	[TX_HOOK_NONE].name		= "disconnected"
+};
+
+static unsigned int fp_forward_rx_hook = FP_FORWARD_RX_HOOK_DEFAULT;
+static unsigned int fp_forward_tx_hook = FP_FORWARD_TX_HOOK_DEFAULT;
+
+static void fp_print_pkt(char *buf, u32 buf_len)
+{
+	if (!fp_chan)
+		return;
+
+	relay_write(fp_chan, buf, buf_len);
+}
+
+static void fp_dump_input_pkt(struct sk_buff *skb, char *rx_tx)
+{
+	struct iphdr *iph = (struct iphdr *)(skb->data);
+	u8 version = iph->version;
+	char buf[512] = {0};
+	u32 len = 0;
+	u64 ts_nsec;
+	unsigned long rem_nsec;
+
+	ts_nsec = local_clock();
+	rem_nsec = do_div(ts_nsec, 1000000000);
+
+	if (version == 4) {
+		if (iph->protocol == IPPROTO_ICMP) {
+			struct icmphdr *icmph = (struct icmphdr *)(iph + 1);
+			int type = icmph->type;
+			if (type == 8) {
+				len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive icmp request, src=%pI4 dst=%pI4 ID=%u SEQ=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &iph->saddr, &iph->daddr, ntohs(icmph->un.echo.id),
+							ntohs(icmph->un.echo.sequence));
+			}else if (type == 0) {
+				len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive icmp reply, src=%pI4 dst=%pI4 ID=%u SEQ=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &iph->saddr, &iph->daddr, ntohs(icmph->un.echo.id),
+							ntohs(icmph->un.echo.sequence));
+			}
+		} else if (iph->protocol == IPPROTO_UDP) {
+			struct udphdr *uh = (struct udphdr *)(iph + 1);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive UDP, src=%pI4 dst=%pI4 ID=%u sp=%u dp=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &iph->saddr, &iph->daddr, ntohs(iph->id), ntohs(uh->source), ntohs(uh->dest));
+		} else if (iph->protocol == IPPROTO_TCP) {
+			struct tcphdr *th = (struct tcphdr *)(iph + 1);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive TCP, src=%pI4 dst=%pI4 ID=%u sp=%u dp=%u seq=%u ack=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &iph->saddr, &iph->daddr, ntohs(iph->id), ntohs(th->source), ntohs(th->dest),
+							ntohl(th->seq), ntohl(th->ack_seq));
+		} else {
+			len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive pkt type %u, src=%pI4 dst=%pI4 ID=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, iph->protocol, &iph->saddr, &iph->daddr, ntohs(iph->id));
+		}
+	} else if (version == 6) {
+		struct ipv6hdr *ip6h;
+		__be16 frag_off;
+		int offset;
+		u8 nexthdr;
+
+		ip6h = (struct ipv6hdr *)(skb->data);
+		nexthdr = ip6h->nexthdr;
+		/* not support fragment pkt */
+		if (nexthdr == NEXTHDR_FRAGMENT)
+			return;
+		if (ipv6_ext_hdr(nexthdr)) {
+			offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
+			if (offset < 0)
+				return;
+		} else
+			offset = sizeof(struct ipv6hdr);
+
+		if (nexthdr == IPPROTO_ICMPV6) {
+			struct icmp6hdr *icmp6;
+			if (!pskb_may_pull(skb, ((unsigned char*)ip6h + offset + 6 - skb->data)))
+				return;
+			icmp6 = (struct icmp6hdr *)((unsigned char*)ip6h + offset);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive icmp6, src=%pI6c dst=%pI6c type=%u code=%u id=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &ip6h->saddr, &ip6h->daddr, icmp6->icmp6_type, icmp6->icmp6_type, 
+							ntohs(icmp6->icmp6_identifier));
+		} else if (nexthdr == IPPROTO_UDP) {
+			struct udphdr *uh6 = (struct udphdr *)((unsigned char*)ip6h + offset);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive UDP6, src=%pI6c dst=%pI6c sp=%u dp=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &ip6h->saddr, &ip6h->daddr, ntohs(uh6->source), ntohs(uh6->dest));
+		} else if (nexthdr == IPPROTO_TCP) {
+			struct tcphdr *th6 = (struct tcphdr *)((unsigned char*)ip6h + offset);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive TCP6, src=%pI6c dst=%pI6c sp=%u dp=%u seq=%u ack=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &ip6h->saddr, &ip6h->daddr, ntohs(th6->source), ntohs(th6->dest),
+							ntohl(th6->seq), ntohl(th6->ack_seq));
+		} else {
+			len = scnprintf(buf, 512, "[%5lu.%06lu] --->>%s, receive pkt type %u, src=%pI6c dst=%pI6c\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, nexthdr, &ip6h->saddr, &ip6h->daddr);
+		}
+	}else {
+		return;
+	}
+
+	fp_print_pkt(buf, len);
+}
+
+static void fp_dump_output_pkt(struct sk_buff *skb, char *rx_tx)
+{
+	struct iphdr *iph;
+	u8 version;
+	char buf[512] = {0};
+	u32 len = 0;
+	u64 ts_nsec;
+	unsigned long rem_nsec;
+
+	ts_nsec = local_clock();
+	rem_nsec = do_div(ts_nsec, 1000000000);
+
+	iph = (struct iphdr *)(skb->data);
+
+	version = iph->version;
+	if (version == 4) {
+		if (iph->protocol == IPPROTO_ICMP) {
+			struct icmphdr *icmph = (struct icmphdr *)(iph + 1);
+			int type = icmph->type;
+			if (type == 8) {
+				len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send icmp request, src=%pI4 dst=%pI4 ID=%u SEQ=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &iph->saddr, &iph->daddr, ntohs(icmph->un.echo.id),
+							ntohs(icmph->un.echo.sequence));
+			}else if (type == 0) {
+				len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send icmp reply, src=%pI4 dst=%pI4 ID=%u SEQ=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &iph->saddr, &iph->daddr, ntohs(icmph->un.echo.id),
+							ntohs(icmph->un.echo.sequence));
+			}
+		} else if (iph->protocol == IPPROTO_UDP) {
+			struct udphdr *uh = (struct udphdr *)(iph + 1);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send UDP, src=%pI4 dst=%pI4 ID=%u sp=%u dp=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &iph->saddr, &iph->daddr, ntohs(iph->id), ntohs(uh->source), ntohs(uh->dest));
+		} else if (iph->protocol == IPPROTO_TCP) {
+			struct tcphdr *th = (struct tcphdr *)(iph + 1);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send TCP, src=%pI4 dst=%pI4 ID=%u sp=%u dp=%u seq=%u ack=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &iph->saddr, &iph->daddr, ntohs(iph->id), ntohs(th->source), ntohs(th->dest),
+							ntohl(th->seq), ntohl(th->ack_seq));
+		} else {
+			len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send pkt type %u, src=%pI4 dst=%pI4 ID=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, iph->protocol, &iph->saddr, &iph->daddr, ntohs(iph->id));
+		}
+	} else if (version == 6) {
+		struct ipv6hdr *ip6h;
+		__be16 frag_off;
+		int offset;
+		u8 nexthdr;
+
+		ip6h = (struct ipv6hdr *)(skb->data);
+
+		nexthdr = ip6h->nexthdr;
+		/* not support fragment pkt */
+		if (nexthdr == NEXTHDR_FRAGMENT)
+			return;
+		if (ipv6_ext_hdr(nexthdr)) {
+			offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
+			if (offset < 0)
+				return;
+		} else
+			offset = sizeof(struct ipv6hdr);
+
+		if (nexthdr == IPPROTO_ICMPV6) {
+			struct icmp6hdr *icmp6;
+			if (!pskb_may_pull(skb, ((unsigned char*)ip6h + offset + 6 - skb->data)))
+				return;
+			icmp6 = (struct icmp6hdr *)((unsigned char*)ip6h + offset);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send icmp6, src=%pI6c dst=%pI6c type=%u code=%u id=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &ip6h->saddr, &ip6h->daddr, icmp6->icmp6_type, icmp6->icmp6_type,
+							ntohs(icmp6->icmp6_identifier));
+		} else if (nexthdr == IPPROTO_UDP) {
+			struct udphdr *uh6 = (struct udphdr *)((unsigned char*)ip6h + offset);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send UDP6, src=%pI6c dst=%pI6c sp=%u dp=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &ip6h->saddr, &ip6h->daddr, ntohs(uh6->source), ntohs(uh6->dest));
+		} else if (nexthdr == IPPROTO_TCP) {
+			struct tcphdr *th6 = (struct tcphdr *)((unsigned char*)ip6h + offset);
+			len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send TCP6, src=%pI6c dst=%pI6c sp=%u dp=%u seq=%u ack=%u\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, &ip6h->saddr, &ip6h->daddr, ntohs(th6->source), ntohs(th6->dest),
+							ntohl(th6->seq), ntohl(th6->ack_seq));
+		} else {
+			len = scnprintf(buf, 512, "[%5lu.%06lu] <<---%s, send pkt type %u, src=%pI6c dst=%pI6c\n",
+							(unsigned long)ts_nsec, rem_nsec / 1000,
+							rx_tx, nexthdr, &ip6h->saddr, &ip6h->daddr);
+		}
+	} else {
+		return;
+	}
+
+	fp_print_pkt(buf, len);
+}
+/**
+ * Forward an skb directly to the output interface if classified as
+ * fastpath. skb->dev must point to the src net_device (done in
+ * eth_type_trans or in drivers)
+ *
+ * @todo 1. Add an option to enable/disable fastpath for a
+ *       specific net_device from userspace (via
+ *       ifconfig/ethtool)
+ * @note If the source net_device doesn't have fastpath enabled,
+ *       a packet from it can still traverse through fastpath if
+ *       the output net_device supports it and there was a match
+ *       in the fastpath database.
+ * @param skb    skb to forward through fastpath
+ *
+ * @return 1 if skb consumed by fastpath, 0 otherwise (should be
+ *         sent through slowpath)
+ */
+static int fp_forward_direct(struct sk_buff *skb)
+{
+	int ret, len = skb->len; /* default is slowpath */
+	struct fp_net_device *dst, *src;
+	struct fpdb_entry *el;
+	struct netdev_queue *txq;
+	struct sk_buff *skb2 = skb;
+	struct nf_conntrack_tuple tuple;
+	const struct net_device_ops *ops;
+
+	/*
+	 * fastpath direct tx hook should be used only when no packets can
+	 * arrive in irq/irq disable context, since fastpath only protects
+	 * at soft-irq level. Otherwise this could possibly result in a deadlock.
+	*/
+
+	WARN_ONCE(in_irq() || irqs_disabled(),
+		  "fastpath direct tx called from irq, or irq disabled!\n");
+
+	el = fpc_classify_start(skb, &tuple);
+	if (unlikely(!el))
+		goto slowpath;
+	rcu_read_lock_bh();
+
+	src = rcu_dereference_bh(el->in_dev);
+	dst = rcu_dereference_bh(el->out_dev);
+
+	if (pkt_debug_level == 2 || pkt_debug_level == 3) {
+		if (!strncasecmp(dst->dev->name, "ccinet", 6))
+			fp_dump_output_pkt(skb, "F_UL");
+		else
+			fp_dump_output_pkt(skb, "F_DL");
+	}
+
+	ops = dst->dev->netdev_ops;
+	if (fpc_classify_finish(skb, el)) {
+		rcu_read_unlock_bh();
+		goto slowpath;
+	}
+
+	skb_reset_mac_header(skb);
+	txq = netdev_core_pick_tx(dst->dev, skb, NULL);
+	HARD_TX_LOCK(dst->dev, txq, smp_processor_id());
+	if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
+		skb2 = NULL;
+		dst->stats.queue_stopped++;
+	}
+
+	if (skb2)
+		skb2->dev = dst->dev;
+	ret = skb2 ? ops->ndo_start_xmit(skb2, dst->dev) : NETDEV_TX_BUSY;
+
+	switch (ret) {
+	case NETDEV_TX_OK:
+		/* sent through fastpath */
+		txq_trans_update(txq);
+		src->stats.rx_packets++;
+		src->stats.rx_bytes += len;
+		dst->stats.tx_packets++;
+		if (dst->dev->header_ops)
+			dst->stats.tx_bytes += len + ETH_HLEN;
+		else
+			dst->stats.tx_bytes += len;
+		break;
+	case NET_XMIT_CN:
+		src->stats.rx_dropped++;
+		dst->stats.tx_dropped++;
+		break;
+	case NET_XMIT_DROP:
+	case NETDEV_TX_BUSY:
+	default:
+		if (unlikely(skb2)) {
+			/* shouldn't happen since we check txq before trying to transmit */
+			src->stats.rx_errors++;
+			dst->stats.tx_errors++;
+			printk(KERN_DEBUG "Failed to send through fastpath (ret=%d)\n", ret);
+		}
+
+		if (drop_on_busy) {
+			src->stats.rx_dropped++;
+			dst->stats.tx_dropped++;
+			dev_kfree_skb_any(skb);
+		}
+	}
+
+	HARD_TX_UNLOCK(dst->dev, txq);
+
+#ifdef CONFIG_ASR_TOE
+	if ((0 == el->nl_flag) && (ret == NETDEV_TX_OK)) {
+		//fpdb_dump_entry("fp_cm_genl_send_tuple, entry dump:\n", el);
+		fp_cm_genl_send_tuple(&tuple, el, 1, len);
+	}
+#endif
+	rcu_read_unlock_bh();
+
+	if (likely(skb2) || drop_on_busy)
+		return 1;
+slowpath:
+	if (pkt_debug_level == 2 || pkt_debug_level == 3) {
+		if (!strncasecmp(skb->dev->name, "ccinet", 6))
+			fp_dump_output_pkt(skb, "S_DL");
+		else
+			fp_dump_output_pkt(skb, "S_UL");
+	}
+
+	/* DO NOT do skb copy if the skb is allocated from skbrb
+	 * (skb ring buffer for bridge performace)
+	 */
+	if (!IS_SKBRB_SKB(skb) && FP_IS_SKB_P(skb))
+		pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+	return 0;
+}
+
+/**
+ * Forward an skb to the output device's queue if classified as fastpath.
+ * 
+ * @param skb    skb to forward
+ * 
+ * @return 1 if consumed by fastpath, 0 otherwise (should be sent through slowpath)
+ */
+static int fp_forward_queue(struct sk_buff *skb)
+{
+	int ret, len = skb->len;
+	struct fp_net_device *dst, *src;
+	struct fpdb_entry *el;
+	struct vlan_hdr *vhdr;
+	struct nf_conntrack_tuple tuple;
+
+	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+			skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+		vhdr = (struct vlan_hdr *) skb->data;
+		skb = skb_vlan_untag(skb);
+		if (unlikely(!skb))
+			return 0;
+		if (skb_vlan_tag_present(skb)) {
+			if (!vlan_do_receive(&skb)) {
+				if (unlikely(!skb))
+					return 1;
+			}
+		}
+	}
+
+	el = fpc_classify_start(skb, &tuple);
+	if (unlikely(!el)) {
+		/* DO NOT do skb copy if the skb is allocated from skbrb
+		 * (skb ring buffer for bridge performace)
+		 */
+		if (!IS_SKBRB_SKB(skb) && FP_IS_SKB_P(skb))
+			pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		return 0;
+	}
+
+#ifdef CONFIG_ASR_TOE
+	if (0 == el->nl_flag) {
+		//fpdb_dump_entry("fp_cm_genl_send_tuple, entry dump:\n", el);
+		fp_cm_genl_send_tuple(&tuple, el, 1, len);
+	}
+#endif
+
+	src = fpdev_hold(el->in_dev);
+	dst = fpdev_hold(el->out_dev);
+	if (fpc_classify_finish(skb, el))
+		return 0;
+
+	skb->dev = dst->dev;
+
+	ret = dev_queue_xmit(skb);
+	switch (ret) {
+	case NET_XMIT_SUCCESS:
+		src->stats.rx_bytes += len;
+		src->stats.rx_packets++;
+		dst->stats.tx_bytes += len + ETH_HLEN;
+		dst->stats.tx_packets++;
+		break;
+	case NET_XMIT_CN:
+		src->stats.rx_dropped++;
+		dst->stats.tx_dropped++;
+		dst->stats.queue_stopped++;
+		break;
+	case NET_XMIT_DROP:
+	default:
+		pr_info("unexpected return code from dev_queue_xmit (%d)\n", ret);
+		src->stats.rx_errors++;
+		dst->stats.tx_errors++;
+	}
+
+	fpdev_put(dst);
+	fpdev_put(src);
+
+	return 1;
+}
+
+static unsigned int fp_forward_nf_hook(void *priv, struct sk_buff *skb,
+	   const struct nf_hook_state *state)
+{
+	WARN_ON_ONCE(irqs_disabled());
+
+	if (fp_forward_output(skb))
+		return NF_STOLEN;
+
+	return NF_ACCEPT;
+}
+
+static int fp_forward_netif_rx(struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	struct ipv6hdr *ipv6h;
+	u32 len;
+	struct vlan_hdr *vhdr;
+	int ret;
+
+	if (unlikely(bypass_fastpath == 1))
+		goto slowpath;
+
+#ifdef FP_RX_IN_INTR_TO_NETFILTER
+	if (in_irq() || irqs_disabled())
+		goto slowpath;
+#endif
+	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+			skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+		vhdr = (struct vlan_hdr *) skb->data;
+		skb = skb_vlan_untag(skb);
+		if (unlikely(!skb))
+			goto slowpath;
+		if (skb_vlan_tag_present(skb)) {
+			if (!vlan_do_receive(&skb)) {
+				if (unlikely(!skb))
+					return 1;
+			}
+		}
+	}
+
+	if (pkt_debug_level == 1 || pkt_debug_level == 3) {
+		if (!strncasecmp(skb->dev->name, "ccinet", 6))
+			fp_dump_input_pkt(skb, "DL");
+		else
+			fp_dump_input_pkt(skb, "UL");
+	}
+
+	iph = (struct iphdr *)skb->data;
+
+	if (likely(iph->version == 4)) {
+
+		if (iph->ihl < 5)
+			goto slowpath_warn;
+
+		len = ntohs(iph->tot_len);
+
+		if (skb->len < len || len < (iph->ihl * 4))
+			goto slowpath_warn;
+
+	} else if (likely(iph->version == 6)) {
+
+		ipv6h = (struct ipv6hdr *)skb->data;
+
+		len = ntohs(ipv6h->payload_len);
+
+		if (!len && ipv6h->nexthdr == NEXTHDR_HOP)
+			goto done;
+
+		if (len + sizeof(struct ipv6hdr) > skb->len)
+			goto slowpath_warn;
+
+		len = len + sizeof(struct ipv6hdr);
+	} else {
+		goto slowpath;
+	}
+
+	/* trim possible padding on skb*/
+	if (pskb_trim_rcsum(skb, len))
+		goto slowpath_warn;
+
+done:
+	ret = fp_forward_output(skb);
+	if (!ret) {
+		if (reply_ra && fpnd_is_rs(skb)) {
+			struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
+			printk(KERN_DEBUG "received RS on dev (%s), saddr=%pI6c, daddr=%pI6c\n",
+				skb->dev->name, &ipv6h->saddr, &ipv6h->daddr);
+			return fpnd_process_rs(skb);
+		}
+
+		if (fpnd_is_ra(skb)) {
+			struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
+			printk(KERN_DEBUG "received RA on dev (%s), saddr=%pI6c, daddr=%pI6c\n",
+				skb->dev->name, &ipv6h->saddr, &ipv6h->daddr);
+			fpnd_process_ra(skb->dev, skb);
+		}
+	}
+	return ret;
+slowpath_warn:
+	pr_debug_ratelimited("bad ip header received\n");
+slowpath:
+	return 0;
+}
+
+static inline int fp_forward_output(struct sk_buff *skb)
+{
+	if (unlikely(bypass_fastpath == 1))
+		return 0;
+
+	if (output)
+		return output(skb);
+	return 0;
+}
+
+static inline void tx_hook_disconnect(struct fp_forward *priv)
+{
+	BUG_ON(!priv);
+	priv->tx_hook = &tx_hooks[TX_HOOK_NONE];
+	output = priv->tx_hook->output;
+}
+
+static inline void tx_hook_connect(struct fp_forward *priv)
+{
+	BUG_ON(!priv || !priv->tx_hook || !priv->tx_hook->output);
+	output = priv->tx_hook->output;
+}
+
+static inline void rx_hook_disconnect(struct fp_forward *priv)
+{
+	BUG_ON(!priv);
+
+	if (priv->rx_hook->disconnect)
+		priv->rx_hook->disconnect();
+	priv->rx_hook = &rx_hooks[RX_HOOK_NONE];
+}
+
+static inline int rx_hook_connect(struct fp_forward *priv)
+{
+	int ret;
+
+	BUG_ON(!priv || !priv->rx_hook || !priv->rx_hook->connect);
+
+	ret = priv->rx_hook->connect();
+	if (ret < 0) {
+		pr_err("rx_hook connect failed (%d)\n", ret);
+		priv->rx_hook =  &rx_hooks[RX_HOOK_NONE];
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline void fp_forward_disconnect(struct fp_forward *priv)
+{
+	tx_hook_disconnect(priv);
+	rx_hook_disconnect(priv);
+#ifdef FP_RX_IN_INTR_TO_NETFILTER
+	if (priv->rx_hook != &rx_hooks[RX_HOOK_NETFILTER])
+		nf_rx_hook_disconnect();
+#endif
+}
+
+static inline int fp_forward_connect(struct fp_forward *priv)
+{
+	int ret;
+
+	tx_hook_connect(priv);
+	ret = rx_hook_connect(priv);
+	if (ret < 0) {
+		pr_err("rx_hook connect failed (%d)\n", ret);
+		tx_hook_disconnect(priv);
+		return ret;
+	}
+
+#ifdef FP_RX_IN_INTR_TO_NETFILTER
+	if (priv->rx_hook != &rx_hooks[RX_HOOK_NETFILTER]) {
+		ret = nf_rx_hook_connect();
+		if (ret < 0) {
+			pr_err("netfilter rx_hook connect failed (%d)\n", ret);
+			return ret;
+		}
+		//pr_info("=== mfp: also enable netfilter hook for RX\n");
+	}
+#endif
+
+	return 0;
+}
+
+static ssize_t rx_hook_show(struct fastpath_module *m, char *buf)
+{
+	struct fp_forward *priv = m->priv;
+	int i, len = sprintf(buf, "fastpath forward rx hooks:\n");
+	char c;
+
+	for (i = 0; i < ARRAY_SIZE(rx_hooks); i++) {
+		c = (priv->rx_hook == &rx_hooks[i]) ? '*' : ' ';
+		len += sprintf(buf+len, "%c %s\n", c, rx_hooks[i].name);
+	}
+
+	return len;
+}
+
+
+static ssize_t rx_hook_store(struct fastpath_module *m, const char *buf,
+			      size_t count)
+{
+	struct fp_forward *priv = m->priv;
+	struct rx_hook_struct *rx_hook;
+	unsigned int idx;
+	int ret;
+
+	sscanf(buf, "%u", &idx);
+
+	if (idx > ARRAY_SIZE(rx_hooks) - 1) {
+		pr_debug("Invalid rx hook=%d\n", idx);
+		return -EINVAL;
+	}
+
+	rx_hook = &rx_hooks[idx];
+	if (rx_hook == priv->rx_hook)
+		return count; /* no change */
+
+#ifdef FP_RX_IN_INTR_TO_NETFILTER
+	if (priv->rx_hook != &rx_hooks[RX_HOOK_NETFILTER])
+		rx_hook_disconnect(priv);
+#else
+	rx_hook_disconnect(priv);
+#endif
+	priv->rx_hook = rx_hook;
+
+#ifdef FP_RX_IN_INTR_TO_NETFILTER
+	if (rx_hook == &rx_hooks[RX_HOOK_NETFILTER])
+		return count;
+#endif
+	ret = rx_hook_connect(priv);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static ssize_t tx_hook_show(struct fastpath_module *m, char *buf)
+{
+	struct fp_forward *priv = m->priv;
+	int i, len = sprintf(buf, "fastpath forward tx hooks:\n");
+	char c;
+
+	for (i = 0; i < ARRAY_SIZE(tx_hooks); i++) {
+		c = (priv->tx_hook == &tx_hooks[i]) ? '*' : ' ';
+		len += sprintf(buf+len, "%c %s\n", c, tx_hooks[i].name);
+	}
+
+	return len;
+}
+
+
+static ssize_t tx_hook_store(struct fastpath_module *m, const char *buf,
+			      size_t count)
+{
+	struct fp_forward *priv = m->priv;
+	struct tx_hook_struct *tx_hook;
+	unsigned int idx;
+
+	sscanf(buf, "%u", &idx);
+
+	if (idx > ARRAY_SIZE(tx_hooks) - 1) {
+		pr_debug("Invalid tx hook=%d\n", idx);
+		return -EINVAL;
+	}
+
+	tx_hook = &tx_hooks[idx];
+
+	if (tx_hook == priv->tx_hook)
+		return count; /* no change */
+
+	tx_hook_disconnect(priv);
+	priv->tx_hook = tx_hook;
+	tx_hook_connect(priv);
+
+	return count;
+}
+
+static ssize_t dob_show(struct fastpath_module *m, char *buf)
+{
+	return sprintf(buf, "fastpath forward drop on busy: %d\n", drop_on_busy);
+}
+
+
+static ssize_t dob_store(struct fastpath_module *m, const char *buf,
+			      size_t count)
+{
+	unsigned int dob;
+
+	sscanf(buf, "%u", &dob);
+
+	if (dob != 0 && dob != 1) {
+		pr_debug("Invalid value %d - should be 1/0 \n", dob);
+		return -EINVAL;
+	}
+
+	drop_on_busy = dob;
+
+	return count;
+}
+
+static ssize_t bypass_show(struct fastpath_module *m, char *buf)
+{
+	return sprintf(buf, "fastpath bypass flag: %d\n", bypass_fastpath);
+}
+
+
+static ssize_t bypass_store(struct fastpath_module *m, const char *buf,
+			      size_t count)
+{
+	unsigned int bypass_fastpath_flag;
+
+	sscanf(buf, "%u", &bypass_fastpath_flag);
+
+	if (bypass_fastpath_flag != 0 && bypass_fastpath_flag != 1) {
+		pr_debug("bypass_store: Invalid value %d - should be 1/0 \n",
+			bypass_fastpath_flag);
+		return -EINVAL;
+	}
+	bypass_fastpath = bypass_fastpath_flag;
+	return count;
+}
+
+
+static void fp_forward_release(struct kobject *kobj)
+{
+	struct fastpath_module *module = to_fpmod(kobj);
+	struct fp_forward *priv = module->priv;
+
+	fp_forward_disconnect(priv);
+
+	pr_debug("fp_forward released\n");
+	kfree(priv);
+	kfree(module);
+}
+
+static ssize_t reply_ra_show(struct fastpath_module *m, char *buf)
+{
+	return sprintf(buf, "fastpath reply_ra flag: %d\n", reply_ra);
+}
+
+static ssize_t reply_ra_store(struct fastpath_module *m, const char *buf,
+		size_t count)
+{
+	unsigned int reply_ra_flag;
+
+	sscanf(buf, "%u", &reply_ra_flag);
+
+	if (reply_ra_flag != 0 && reply_ra_flag != 1) {
+		pr_debug("reply_ra_store: Invalid value %d - should be 1/0 \n",
+		  reply_ra_flag);
+		return -EINVAL;
+	}
+	reply_ra = reply_ra_flag;
+	return count;
+}
+
+static struct dentry *create_buf_file_handler(const char *filename,
+						 struct dentry *parent,
+						 umode_t mode,
+						 struct rchan_buf *buf,
+						 int *is_global)
+{
+	struct dentry *buf_file;
+
+	buf_file = debugfs_create_file(filename, mode|S_IRUGO|S_IWUSR, parent, buf,
+					  &relay_file_operations);
+	*is_global = 1;
+	return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+	debugfs_remove(dentry);
+	return 0;
+}
+
+static struct rchan_callbacks fp_relay_callbacks =
+{
+	.create_buf_file = create_buf_file_handler,
+	.remove_buf_file = remove_buf_file_handler,
+};
+
+static ssize_t pkt_debug_level_show(struct fastpath_module *m, char *buf)
+{
+	return sprintf(buf, "%d\n", pkt_debug_level);
+}
+
+static ssize_t set_pkt_debug_level(struct fastpath_module *m, const char *buf,
+					  size_t count)
+{
+	unsigned int cmd;
+
+	sscanf(buf, "%u", &cmd);
+
+	if (cmd > 3) {
+		pr_debug("Invalid value for pkt_debug_level %d\n", cmd);
+		return -EINVAL;
+	}
+	/* 0: off
+	   1: rx enable
+	   2: tx enable
+	   3: rx+tx enable
+	 */
+	pkt_debug_level = cmd;
+
+	if (cmd > 0) {
+		fp_dir = debugfs_create_dir("fastpath", NULL);
+		if (!fp_dir) {
+			pr_err("debugfs_create_dir fastpath failed.\n");
+			return count;
+		}
+
+		fp_chan = relay_open("pkt_debug", fp_dir, 0x200000, 1, &fp_relay_callbacks, NULL);;
+		if(!fp_chan){
+			pr_err("relay_open pkt_debug failed.\n");
+			debugfs_remove(fp_dir);
+		}
+	} else if (cmd == 0) {
+		if (fp_chan) {
+			relay_close(fp_chan);
+			fp_chan = NULL;
+			debugfs_remove(fp_dir);
+		}
+	}
+
+	return count;
+}
+
+static FP_ATTR(rx_hook, S_IRUGO|S_IWUSR, rx_hook_show, rx_hook_store);
+static FP_ATTR(tx_hook, S_IRUGO|S_IWUSR, tx_hook_show, tx_hook_store);
+static FP_ATTR(drop_on_busy, S_IRUGO|S_IWUSR, dob_show, dob_store);
+static FP_ATTR(bypass_fastpath, S_IRUGO|S_IWUSR, bypass_show, bypass_store);
+static FP_ATTR(reply_ra, S_IRUGO|S_IWUSR, reply_ra_show, reply_ra_store);
+static FP_ATTR(pkt_debug, S_IRUGO|S_IWUSR, pkt_debug_level_show, set_pkt_debug_level);
+
+static struct attribute *fp_forward_attrs[] = {
+	&fp_attr_rx_hook.attr,
+	&fp_attr_tx_hook.attr,
+	&fp_attr_drop_on_busy.attr,
+	&fp_attr_bypass_fastpath.attr,
+	&fp_attr_reply_ra.attr,
+	&fp_attr_pkt_debug.attr,
+	NULL, /* need to NULL terminate the list of attributes */
+};
+
+static struct kobj_type ktype_forward = {
+	.sysfs_ops	= &fp_sysfs_ops,
+	.default_attrs	= fp_forward_attrs,
+	.release	= fp_forward_release,
+};
+
+static int fp_forward_probe(struct fastpath_module *module)
+{
+	int ret;
+	struct fp_forward *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		pr_err("no memeory\n");
+		return -ENOMEM;
+	}
+
+	module->priv = priv;
+	snprintf(module->name, sizeof(module->name), "fp_forward");
+	spin_lock_init(&priv->lock);
+
+	if ((fp_forward_rx_hook > ARRAY_SIZE(rx_hooks) - 1) ||
+	    (fp_forward_tx_hook > ARRAY_SIZE(tx_hooks) - 1)) {
+		pr_err("Invalid hook (rx_hook=%d , tx_hook=%d)\n",
+			fp_forward_rx_hook, fp_forward_tx_hook);
+		ret = -EINVAL;
+		goto priv_kfree;
+	}
+	priv->rx_hook = &rx_hooks[fp_forward_rx_hook];
+	priv->tx_hook = &tx_hooks[fp_forward_tx_hook];
+
+	ret = fp_forward_connect(priv);
+	if (ret < 0) {
+		pr_err("rx connect failed\n");
+		goto priv_kfree;
+	}
+
+	kobject_init(&module->kobj, &ktype_forward);
+	ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
+	if (ret < 0) {
+		pr_err("kobject_add failed (%d)\n", ret);
+		goto fp_forward_disconnect;
+	}
+	kobject_uevent(&module->kobj, KOBJ_ADD);
+
+	pr_debug("fp_forward probed\n");
+	return 0;
+
+fp_forward_disconnect:
+	kobject_put(&module->kobj);
+	fp_forward_disconnect(priv);
+priv_kfree:
+	kfree(priv);
+	return ret;
+}
+
+static int fp_forward_remove(struct fastpath_module *module)
+{
+	if (fp_chan) {
+		relay_close(fp_chan);
+		fp_chan = NULL;
+		debugfs_remove(fp_dir);
+	}
+
+	kobject_put(&module->kobj);
+
+	pr_debug("fp_forward removed\n");
+	return 0;
+}
+
+struct fastpath_module_ops fp_forward_ops = {
+	.probe = fp_forward_probe,
+	.remove = fp_forward_remove
+};
+
+module_param(fp_forward_rx_hook, uint, 0);
+MODULE_PARM_DESC(fp_forward_rx_hook, "fastpath forward rx hook (default="
+			__MODULE_STRING(FP_FORWARD_RX_HOOK_DEFAULT) ")");
+module_param(fp_forward_tx_hook, uint, 0);
+MODULE_PARM_DESC(fp_forward_tx_hook, "fastpath forward rx hook (default="
+			__MODULE_STRING(FP_FORWARD_TX_HOOK_DEFAULT) ")");
diff --git a/package/kernel/mfp/files/fp_learner_nc.c b/package/kernel/mfp/files/fp_learner_nc.c
new file mode 100644
index 0000000..1f9c014
--- /dev/null
+++ b/package/kernel/mfp/files/fp_learner_nc.c
@@ -0,0 +1,1750 @@
+/*
+ *	Fastpath Learner
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU FP_ERR( Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+#define pr_fmt(fmt) "mfp" " learner:%s:%d: " fmt, __func__, __LINE__
+
+#include <br_private.h>
+#include <net/addrconf.h>
+#include <linux/inetdevice.h>
+#include "fp_common.h"
+#include "fp_database.h"
+#include "fp_device.h"
+#include "fp_core.h"
+#include "fp_netlink.h"
+
+#define RTMGRP_IPV4_ROUTE	0x40
+#define RTMGRP_IPV4_RULE	0x80
+#define RTMGRP_IPV6_ROUTE	0x400
+#define RTNETLINK_GRP (RTMGRP_IPV4_ROUTE | RTMGRP_IPV4_RULE | RTMGRP_IPV6_ROUTE)
+
+#define NFLGRP2MASK(group) ((((group) > NFNLGRP_NONE) && \
+			     ((group) < __NFNLGRP_MAX)) ? \
+			     (0x1UL << ((group) - 1)) : 0)
+
+#define NFNETLINK_GRP					\
+	NFLGRP2MASK(NFNLGRP_CONNTRACK_NEW) |		\
+	NFLGRP2MASK(NFNLGRP_CONNTRACK_UPDATE) |		\
+	NFLGRP2MASK(NFNLGRP_CONNTRACK_DESTROY) |	\
+	NFLGRP2MASK(NFNLGRP_CONNTRACK_EXP_NEW) |	\
+	NFLGRP2MASK(NFNLGRP_CONNTRACK_EXP_UPDATE) |	\
+	NFLGRP2MASK(NFNLGRP_CONNTRACK_EXP_DESTROY)
+
+/* ipv6 special flags always rejected (RTF values > 64K) */
+#define RT6_REJECT_MASK	~(RTF_UP | RTF_GATEWAY | RTF_HOST | \
+			  RTF_REINSTATE | RTF_DYNAMIC | RTF_MODIFIED | \
+			  RTF_DEFAULT | RTF_ADDRCONF | RTF_CACHE)
+
+#define DEFAULT_LOOKUPS_DELAY_MS	(5)
+#define DEFAULT_LOOKUPS_RETRIES	(10)
+
+#define NETIF_INVALID(x) (!(x) || !netif_device_present(x) || \
+	!netif_running(x) || !netif_carrier_ok(x))
+
+static inline struct net_device *
+get_netdev_from_br(struct net_device *br, struct nf_conntrack_tuple *tuple);
+
+static bool fp_learner_wq = FP_LEARNER_WQ_DEFAULT;
+
+struct policy_entry {
+	struct list_head list;
+	unsigned int port;
+};
+
+struct fp_learner {
+	spinlock_t lock;
+	struct list_head work_items_list;
+	struct list_head policy_list;
+	struct workqueue_struct *wq;
+	struct work_struct update_work;
+	struct socket *rt_nl_sock;
+	struct socket *nf_nl_sock;
+	struct notifier_block netdev_notifier;
+	struct notifier_block netevent_notifier;
+	struct notifier_block inet6addr_notifier;
+
+	unsigned int lookups_retries;
+	unsigned int lookups_delay;
+	unsigned int fp_rmmoding;
+};
+
+struct learner_work {
+	struct list_head list;
+	struct fp_learner *priv;
+	struct delayed_work work;
+	/* add new connection data*/
+	struct nf_conn *ct;
+};
+
+struct nf_conn *
+__get_conntrack_from_nlmsg(struct sk_buff *skb, struct nlmsghdr *nlh);
+struct nf_conntrack_expect *
+__get_expect_from_nlmsg(struct sk_buff *skb, struct nlmsghdr *nlh);
+
+void learner_nc_dump_conntrack_tuple(char *msg, struct nf_conn *ct)
+{
+	struct nf_conntrack_tuple *orig_tuple =
+	    &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+
+	struct nf_conntrack_tuple *reply_tuple =
+	    &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+	char buf[MAX_DEBUG_PRINT_SIZE];
+	int len = 0;
+
+	if (msg)
+		len = sprintf(buf, "%s", msg);
+
+	len += sprintf(buf + len, "tuple orig:\n");
+	len += fp_dump_tuple(buf + len, orig_tuple);
+	len += sprintf(buf + len, "\ntuple reply:\n");
+	len += fp_dump_tuple(buf + len, reply_tuple);
+
+	pr_err("%s\n", buf);
+}
+
+static inline bool policy_check_port(u_int8_t protocol, __be16 port)
+{
+	if (protocol == IPPROTO_UDP) {
+		switch (ntohs(port)) {
+		case 53:	/* DNS */
+		case 67:	/* bootps */
+		case 68:	/* bootpc */
+		case 69:	/* Trivial File Transfer Protocol (TFTP) */
+		case 135:	/* DHCP server, DNS server and WINS. Also used by DCOM */
+		case 137:	/* NetBIOS NetBIOS Name Service */
+		case 138:	/* NetBIOS NetBIOS Datagram Service */
+		case 139:	/* NetBIOS NetBIOS Session Service */
+		case 161:	/* SNMP */
+		case 162:	/* SNMPTRAP */
+		case 199:	/* SMUX, SNMP Unix Multiplexer */
+		case 517:	/* Talk */
+		case 518:	/* NTalk */
+		case 546:	/* DHCPv6 client*/
+		case 547:	/* DHCPv6 server*/
+		case 953:	/* Domain Name System (DNS) RNDC Service */
+		case 1719:	/* H.323 Registration and alternate communication */
+		case 1723:	/* Microsoft Point-to-Point Tunneling Protocol (PPTP) */
+		case 5060:	/* Session Initiation Protocol (SIP) */
+		case 5353:	/* Multicast DNS (mDNS) */
+		case 6566:	/* SANE (Scanner Access Now Easy) */
+		case 20480:	/* emwavemsg (emWave Message Service) */
+			return false;
+		}
+	} else {		/* TCP */
+		switch (ntohs(port)) {
+		case 21:	/* FTP control (command) */
+		case 53:	/* DNS */
+		case 135:	/* DHCP server, DNS server and WINS. Also used by DCOM */
+		case 137:	/* NetBIOS NetBIOS Name Service */
+		case 138:	/* NetBIOS NetBIOS Datagram Service */
+		case 139:	/* NetBIOS NetBIOS Session Service */
+		case 162:	/* SNMPTRAP */
+		case 199:	/* SMUX, SNMP Unix Multiplexer */
+		case 546:	/* DHCPv6 client*/
+		case 547:	/* DHCPv6 server*/
+		case 953:	/* Domain Name System (DNS) RNDC Service */
+		case 1720:	/* H.323 Call signalling */
+		case 1723:	/* Microsoft Point-to-Point Tunneling Protocol (PPTP) */
+		case 5060:	/* Session Initiation Protocol (SIP) */
+		case 6566:	/* SANE (Scanner Access Now Easy) */
+		case 6667:	/* Internet Relay Chat (IRC) */
+		case 20480:	/* emwavemsg (emWave Message Service) */
+			return false;
+		}
+	}
+
+	return true;
+}
+
+
+
+static bool learner_policy_check(struct fp_learner *priv, struct nf_conn *ct)
+{
+	const struct nf_conntrack_l4proto *l4proto;
+	struct policy_entry *itr;
+	struct nf_conntrack_tuple *orig_tuple;
+
+	orig_tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+
+	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+	NF_CT_ASSERT(l4proto);
+
+	if (!l4proto->l4proto)
+		goto fail;
+
+	/* check protocol is UDP/TCP */
+	if (l4proto->l4proto != IPPROTO_UDP &&
+	    l4proto->l4proto != IPPROTO_TCP)
+		goto fail;
+
+	if (!policy_check_port(l4proto->l4proto, orig_tuple->dst.u.all))
+		goto fail;
+
+	if (!policy_check_port(l4proto->l4proto, orig_tuple->src.u.all))
+		goto fail;
+
+	/* Check dynamic policy */
+	spin_lock_bh(&priv->lock);
+	list_for_each_entry(itr, &priv->policy_list, list)
+		if (itr && ((itr->port == ntohs(orig_tuple->dst.u.all)) ||
+		    (itr->port == ntohs(orig_tuple->src.u.all)))) {
+			spin_unlock_bh(&priv->lock);
+			goto fail;
+		}
+	spin_unlock_bh(&priv->lock);
+
+	return true;
+fail:
+	pr_debug("connection %p failed police check\n", ct);
+	return false;
+}
+
+static inline void flowi_init(struct flowi *fl, int iif,
+				__u8 scope, __u8 proto,
+				__be32 daddr, __be32 saddr,
+				__be16 dport, __be16 sport, 
+				__u32 mark)
+{
+	memset(fl, 0, sizeof(*fl));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	fl->flowi_iif = iif;
+	fl->flowi_scope = scope;
+	fl->flowi_proto = proto;
+	fl->u.ip4.daddr = daddr;
+	fl->u.ip4.saddr = saddr;
+	fl->u.ip4.fl4_dport = dport;
+	fl->u.ip4.fl4_sport = sport;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+	fl->flowi_mark = mark;
+#endif
+#else
+	fl->iif = iif;
+	fl->fl4_scope = scope;
+	fl->proto = proto;
+	fl->fl4_dst = daddr;
+	fl->fl4_src = saddr;
+	fl->fl_ip_dport = dport;
+	fl->fl_ip_sport = sport;
+#endif
+}
+
+static inline bool invert_tuple(struct nf_conntrack_tuple *inverse,
+				struct nf_conntrack_tuple *orig)
+{
+	return nf_ct_invert_tuple(inverse, orig);
+}
+
+static inline bool ipv6_check_special_addr(const struct in6_addr *addr)
+{
+	int addr_type = ipv6_addr_type(addr);
+	/* TODO: check if we need to filter other types - such as Link Local */
+	return ((addr_type & IPV6_ADDR_MULTICAST) ||
+		(addr_type & IPV6_ADDR_LOOPBACK) ||
+		(addr_type & IPV6_ADDR_ANY));
+}
+
+static struct net_device *fp_get_route_ipv6(struct nf_conn *ct,
+					    struct nf_conntrack_tuple *tuple,
+					    unsigned int *route)
+{
+	struct net_device *dev = NULL;
+	struct flowi6 fl6 = {
+			.flowi6_oif = 0,
+			.daddr = tuple->dst.u3.in6,
+		};
+	int flags = RT6_LOOKUP_F_IFACE;
+	struct fib6_result res = {};
+	int ret = 0;
+
+	if (ipv6_check_special_addr(&tuple->dst.u3.in6) ||
+	    ipv6_check_special_addr(&tuple->src.u3.in6)) {
+		pr_debug("Filter special address (saddr=%pI6c, daddr=%pI6c)\n",
+			&tuple->src.u3.in6, &tuple->dst.u3.in6);
+		return NULL;
+	}
+
+	//if (&tuple->src.u3.in6) {
+		memcpy(&fl6.saddr, &tuple->src.u3.in6, sizeof(tuple->src.u3.in6));
+		flags |= RT6_LOOKUP_F_HAS_SADDR;
+	//}
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+	fl6.flowi6_mark = ct->mark;
+#endif
+
+	ret = ip6_route_lookup_fastpath(nf_ct_net(ct), &fl6, &res, flags);
+	if (ret){
+		pr_debug("rt6_lookup failed\n");
+		goto out;
+	}
+
+	/* check if route is usable*/
+	if (res.fib6_flags & RTF_UP) {
+		if (res.fib6_flags & RT6_REJECT_MASK) {
+			pr_debug("route rejected (rt6i_flags = 0x%08x)\n", res.fib6_flags);
+			goto out;
+		}
+		/* accepted in fastpath */
+		dev = res.nh->fib_nh_dev;
+		*route = res.fib6_flags;
+	}
+
+out:
+	return dev;
+}
+
+static inline bool ipv4_check_special_addr(const __be32 addr)
+{
+	/* Filter multicast, broadcast, loopback and zero net*/
+	return (ipv4_is_loopback(addr) || ipv4_is_multicast(addr) ||
+		ipv4_is_lbcast(addr) || ipv4_is_zeronet(addr));
+}
+
+static inline struct net_device *fp_get_dev_by_ipaddr(struct nf_conntrack_tuple *tuple)
+{
+	struct net *net;
+	struct net_device *dev;
+	struct in_device *in_dev;
+	struct in_ifaddr *ifa;
+
+	for_each_net(net) {
+		for_each_netdev(net, dev) {
+			in_dev = __in_dev_get_rcu(dev);
+			if (!in_dev)
+				continue;
+
+			in_dev_for_each_ifa_rcu(ifa, in_dev) {
+				if (tuple->src.u3.ip == ifa->ifa_local)
+					return dev;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+static struct net_device *fp_get_route_ipv4(struct nf_conn *ct,
+					    struct nf_conntrack_tuple *tuple,
+					    unsigned int *route)
+{
+	struct fib_result res;
+	struct flowi flp;
+	struct net_device *dev = NULL;
+
+	if (ipv4_check_special_addr(tuple->dst.u3.ip) ||
+	    ipv4_check_special_addr(tuple->src.u3.ip)) {
+		pr_debug("Filter special address (saddr=%pI4, daddr=%pI4)\n",
+			&tuple->src.u3.ip, &tuple->dst.u3.ip);
+		return NULL;
+	}
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+	flowi_init(&flp, 0, 0, tuple->dst.protonum, tuple->dst.u3.ip,
+			   tuple->src.u3.ip, tuple->src.u.all, tuple->dst.u.all, ct->mark);
+#else
+	flowi_init(&flp, 0, 0, tuple->dst.protonum, tuple->dst.u3.ip,
+			   tuple->src.u3.ip, tuple->src.u.all, tuple->dst.u.all, 0);
+#endif
+
+	rcu_read_lock_bh();
+	if (rt4_lookup(nf_ct_net(ct), &flp, &res) < 0) {
+		pr_debug("Getting route failed\n");
+		rcu_read_unlock_bh();
+		return NULL;
+	}
+
+	if (res.type == RTN_BROADCAST) {
+		pr_err("Route = RTN_BROADCAST\n");
+		goto out;
+	}
+
+	if (res.type == RTN_MULTICAST) {
+		pr_err("Route = RTN_MULTICAST\n");
+		goto out;
+	}
+
+	if (res.type == RTN_LOCAL) {
+		pr_debug("Route = RTN_LOCAL\n");
+		goto out;
+	}
+
+	*route = res.type;
+	dev = res.fi->fib_nh->fib_nh_dev;
+
+	if (NF_CT_NAT(ct))
+		dev = fp_get_dev_by_ipaddr(tuple) ? fp_get_dev_by_ipaddr(tuple) : dev;
+out:
+	ip4_rt_put(&res);
+	rcu_read_unlock_bh();
+	return dev;
+}
+
+static struct fp_net_device *fp_get_route(struct nf_conn *ct,
+					   struct nf_conntrack_tuple *tuple,
+					   u32 *route, int retries, int delay)
+{
+	struct fp_net_device *fdev;
+	struct net_device *dev, *br = NULL;
+
+	dev = (tuple->src.l3num == AF_INET6) ?
+				  fp_get_route_ipv6(ct, tuple, route) :
+				  fp_get_route_ipv4(ct, tuple, route);
+	if (!dev)
+		return NULL;
+
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = dev;
+		do {
+			dev = get_netdev_from_br(br, tuple);
+			if (dev)
+				break;
+			if (delay)
+				msleep(delay);
+		} while (retries--);
+
+		if (!dev) {
+			pr_debug("Unable to get net device from bridge IP\n");
+			return NULL;
+		}
+	}
+
+	if (dev->reg_state != NETREG_REGISTERED) {
+		pr_debug("device %s not registred (reg_state=%d)\n", dev->name,
+			  dev->reg_state);
+		return NULL;
+	}
+
+	if (unlikely(NETIF_INVALID(dev)) || !(dev->flags & IFF_UP)) {
+		pr_debug("dev (%s) state invalid (state: %lu) or is not up (flags: 0x%x)\n", dev->name, dev->state, dev->flags);
+		return NULL;
+	}
+
+	fdev = fpdev_get_if(dev);
+	if (!fdev) {
+		pr_err("no fastpath device for %s\n", dev->name);
+		return NULL;
+	}
+
+	fdev->br = br;
+	return fdev;
+}
+
+static inline int ipv4_gw_addr(struct nf_conn *ct, struct net_device *dev,
+				__be32 saddr, __be32 daddr, __be32 *gw)
+{
+	struct fib_result res;
+	int ret = 0;
+	struct flowi flp;
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+	flowi_init(&flp, dev->ifindex, RT_SCOPE_UNIVERSE, 0, daddr, saddr, 0, 0, ct->mark);
+#else
+	flowi_init(&flp, dev->ifindex, RT_SCOPE_UNIVERSE, 0, daddr, saddr, 0, 0, 0);
+#endif
+	rcu_read_lock_bh();
+	ret = rt4_lookup(dev_net(dev), &flp, &res);
+	if (ret != 0) {
+		pr_err("rt4_lookup failed, ret = %d\n", ret);
+		rcu_read_unlock_bh();
+		return ret;
+	}
+
+	if (res.type == RTN_BROADCAST || res.type == RTN_MULTICAST ||
+	    res.type == RTN_LOCAL) {
+		pr_debug("gw not found - res.type = %d\n", res.type);
+		ret = -EFAULT;
+	} else {
+		*gw = res.fi->fib_nh->fib_nh_gw4;
+		pr_debug("gw found (%pI4)\n", gw);
+	}
+
+	ip4_rt_put(&res);
+	rcu_read_unlock_bh();
+	return ret;
+}
+
+static inline int ipv6_gw_addr(struct nf_conn *ct, struct net_device *dev, struct in6_addr *saddr,
+				struct in6_addr *daddr, struct in6_addr *gw)
+{
+	int ret = 0;
+	struct flowi6 fl6 = {
+			.flowi6_oif = 0,
+			.daddr = *daddr,
+		};
+	int flags = RT6_LOOKUP_F_IFACE;
+	struct fib6_result res = {};
+
+	if (saddr) {
+		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
+		flags |= RT6_LOOKUP_F_HAS_SADDR;
+	}
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+		fl6.flowi6_mark = ct->mark;
+#endif
+
+	ret = ip6_route_lookup_fastpath(dev_net(dev), &fl6, &res, flags);
+	if (ret) {
+		pr_err("rt6_lookup failed\n");
+		ret = -ENETUNREACH;
+		goto out;
+	}
+
+	/* check if route is usable*/
+	if (res.fib6_flags & RTF_UP) {
+		if (res.nh->fib_nh_gw_family)
+			*gw = res.nh->fib_nh_gw6;
+	} else {
+		pr_debug("gw found but route is not up\n");
+		ret = -EFAULT;
+	}
+
+out:
+	return ret;
+}
+
+/* copied from br_fdb.c */
+static inline
+struct net_bridge_fdb_entry *fp_br_fdb_find(struct hlist_head *head,
+					    const unsigned char *addr)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	hlist_for_each_entry_rcu(fdb, head, fdb_node) {
+		if (ether_addr_equal(fdb->key.addr.addr, addr))
+			return fdb;
+	}
+
+	return NULL;
+}
+
+static inline
+struct net_device *fp_br_get_netdev_by_mac(struct net_bridge *br,
+					   const unsigned char *mac)
+{
+	unsigned int i;
+	struct net_bridge_fdb_entry *fdb;
+
+	BUG_ON(!br);
+
+	rcu_read_lock_bh();
+	for (i = 0; i < BR_HASH_SIZE; i++) {
+		fdb = fp_br_fdb_find(&br->fdb_list, mac);
+		if (fdb) {
+			pr_debug("br: %s fdb[%u]: %pIM , port:%s\n",
+				 br->dev->name, i, fdb->key.addr.addr,
+				 fdb->dst->dev->name);
+			rcu_read_unlock_bh();
+			return fdb->dst->dev;
+		}
+	}
+	rcu_read_unlock_bh();
+	pr_debug("no match found in fdb (%pM)\n", mac);
+
+	return NULL;
+}
+
+static inline
+struct net_device *get_netdev_from_br(struct net_device *br,
+					struct nf_conntrack_tuple *tuple)
+{
+	struct neighbour *neigh;
+	struct neigh_table *tbl;
+	struct net_device *dev = NULL;
+
+	BUG_ON(!tuple);
+
+	tbl = (tuple->src.l3num == AF_INET6) ? &nd_tbl : &arp_tbl;
+
+	neigh = neigh_lookup(tbl, tuple->dst.u3.all, br);
+	if (neigh) {
+		dev = fp_br_get_netdev_by_mac(netdev_priv(br), neigh->ha);
+		neigh_release(neigh);
+	}
+
+	return dev;
+}
+
+static int fp_hh_init(struct nf_conn *ct, struct nf_conntrack_tuple *t, 
+				struct fp_net_device *dst, struct hh_cache *hh)
+{
+	struct net_device *dev = dst->br ? dst->br : dst->dev;
+	__be16 prot;
+	struct neighbour *n;
+	const struct header_ops *header_ops;
+
+	if (is_vlan_dev(dev))
+		header_ops = vlan_dev_real_dev(dev)->header_ops;
+	else
+		header_ops = dev->header_ops;
+
+	memset(hh, 0, sizeof(*hh));
+
+	if (!header_ops) {
+		pr_debug("device %s has no header ops\n", dev->name);
+		return 0; /* device does not have L2 header*/
+	}
+
+	if (!header_ops->cache || !header_ops->cache_update) {
+		pr_debug("device %s has no header cache ops\n", dev->name);
+		return -ENOTSUPP;
+	}
+
+	if (t->src.l3num == AF_INET) {
+		__be32 gw;
+		prot = htons(ETH_P_IP);
+
+		n = __ipv4_neigh_lookup(dev, t->dst.u3.ip);
+		if (!n) {
+			if (ipv4_gw_addr(ct, dev, t->src.u3.ip, t->dst.u3.ip, &gw))
+				goto not_found;
+			n = __ipv4_neigh_lookup(dev, gw);
+			if (!n)
+				goto not_found;
+		}
+	} else if (t->src.l3num == AF_INET6) {
+		struct in6_addr gw6;
+		prot = htons(ETH_P_IPV6);
+
+		n = __ipv6_neigh_lookup(dev, &t->dst.u3.in6);
+		if (!n) {
+			if (ipv6_gw_addr(ct, dev, &t->src.u3.in6, &t->dst.u3.in6, &gw6))
+				goto not_found;
+			n = __ipv6_neigh_lookup(dev, &t->dst.u3.in6);
+			if (!n)
+				goto not_found;
+		}
+	} else {
+		BUG();
+	}
+
+	if (n->nud_state & NUD_VALID) {
+		int err = header_ops->cache(n, hh, prot);
+		neigh_release(n);
+		pr_debug("device %s hh_cache initialized: hh_len=%d, hh_data=%pM\n",
+			dev->name, hh->hh_len, hh->hh_data);
+		return err;
+	}
+
+	pr_debug("neighbour state invalid (%02x)\n", n->nud_state);
+	neigh_release(n);
+not_found:
+	/* we get here in 2 cases, both are NOT considered as error:
+	 * 1. Neighbour lookup failed - we will be notified when the neighbour
+	 *    will be finally created
+	 * 2. Neighbour state not valid - we will be notified when the neighbour
+	 *    state changes
+	 * Both are handled by netdev_event - where the entry's hh_cache will be
+	 * updated. Untill this happens, all packets matching this entry will be
+	 * classified as slow by the fp_classifier.
+	 */
+	pr_debug("No neighbour found or neighbour state invalid\n");
+	return 0;
+}
+
+static struct fpdb_entry *connection_to_entry(struct fp_learner *priv,
+						struct nf_conn *ct,
+						enum ip_conntrack_dir dir,
+						gfp_t flags)
+{
+	struct fp_net_device *dst = NULL, *src = NULL;
+	struct nf_conntrack_tuple *orig_tuple, *reply_tuple;
+	struct nf_conntrack_tuple orig_tuple_inverse, reply_tuple_inverse;
+	struct fpdb_entry *entry;
+	struct hh_cache hh;
+	unsigned int in_route_type, out_route_type;
+	int retries = flags != GFP_ATOMIC ? priv->lookups_retries : 0;
+	int delay = flags != GFP_ATOMIC ? priv->lookups_delay : 0;
+
+	if (unlikely(priv->fp_rmmoding))
+		goto failed;
+
+	/* For reply connections -> switch tuples */
+	if (dir == IP_CT_DIR_REPLY) {
+		orig_tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+		reply_tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	} else {
+		orig_tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+		reply_tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+	}
+
+	if (NF_CT_NAT(ct)) {
+		if (!invert_tuple(&orig_tuple_inverse, orig_tuple)) {
+			pr_err("Inverting tuple failed\n");
+			goto failed;
+		}
+
+		if (!invert_tuple(&reply_tuple_inverse, reply_tuple)) {
+			pr_err("Inverting tuple failed\n");
+			goto failed;
+		}
+
+		orig_tuple = &reply_tuple_inverse;
+		reply_tuple = &orig_tuple_inverse;
+		pr_debug( "NAT connection was detected\n");
+	}
+
+	/* Check destination route */
+	dst = fp_get_route(ct, orig_tuple, &in_route_type, retries, delay);
+	if (!dst) {
+		pr_debug("Connection routing failed\n");
+		goto failed;
+	}
+
+	/* Check source route */
+	src = fp_get_route(ct, reply_tuple, &out_route_type, retries, delay);
+	if (!src) {
+		pr_debug("Connection routing failed (local)\n");
+		goto failed;
+	}
+
+	if (fp_hh_init(ct, orig_tuple, dst, &hh)) {
+		pr_debug("fp_hh_init failed \n");
+		goto failed;
+	}
+
+	entry = fpdb_alloc(flags);
+	if (!entry) {
+		pr_debug("Allocating entry failed\n");
+		goto failed;
+	}
+
+	/* Restore the original tuples */
+	if (dir == IP_CT_DIR_REPLY) {
+		orig_tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+		reply_tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	} else {
+		orig_tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+		reply_tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+	}
+
+	/*
+	 * if interface is  going down, and we are updating an entry refering
+	 * to this interface, we might accidently route the connection to its
+	 * source device. Block this entry until it is updated again.
+	 */
+	if (src->dev == dst->dev) {
+		pr_debug("Connection created with src == dst for (%s)\n",
+			 src->dev->name);
+		entry->block = 1;
+	}
+
+	/* Fill in entry */
+	entry->dir = dir;
+	entry->in_tuple = *orig_tuple;
+	entry->out_tuple = *reply_tuple;
+	entry->ct = ct;
+	entry->out_dev = dst;
+	entry->in_dev = src;
+	entry->hit_counter = 0;
+	entry->debug.in_route_type = in_route_type;
+	entry->debug.out_route_type = out_route_type;
+	entry->hh = hh;
+
+	/* Succced */
+	pr_debug("connection added (ct=%p, dir=%d)\n", ct, dir);
+	FP_DEBUG_DUMP_CONTRACK(NULL, ct);
+	return entry;
+
+failed:
+	/* Failed */
+	fpdev_put(src);
+	fpdev_put(dst);
+	pr_debug("connection refused (ct=%p, dir=%d)\n", ct, dir);
+	FP_DEBUG_DUMP_CONTRACK(NULL, ct);
+	return NULL;
+}
+
+
+static inline int __add_new_connection(struct fp_learner *priv,
+				       struct nf_conn *ct, gfp_t flags)
+{
+	struct nf_conn_fastpath *fastpath = nfct_fastpath(ct);
+	struct fpdb_entry *e;
+
+	rcu_read_lock_bh();
+	/* original fastpath connection */
+	if (!fastpath) {
+		e = connection_to_entry(priv, ct, IP_CT_DIR_ORIGINAL, flags);
+		if (!e) {
+			rcu_read_unlock_bh();
+			return -EINVAL;
+		}
+
+		set_bit(IPS_FASTPATH_BIT, &ct->status);
+		fastpath = nf_ct_ext_add(ct, NF_CT_EXT_FASTPATH, flags);
+		BUG_ON(!fastpath);
+
+		fastpath->fpd_el[IP_CT_DIR_ORIGINAL] = e;
+		fastpath->fpd_el[IP_CT_DIR_REPLY] = NULL;
+		fpdb_add(e);
+		goto del_entry;
+	}
+
+	/* reply fastpath connection */
+	BUG_ON(!test_bit(IPS_FASTPATH_BIT, &ct->status));
+	if (fastpath->fpd_el[IP_CT_DIR_REPLY] == NULL &&
+		test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+		e = connection_to_entry(priv, ct, IP_CT_DIR_REPLY, flags);
+		if (!e) {
+			rcu_read_unlock_bh();
+			return -EINVAL;
+		}
+
+		fastpath->fpd_el[IP_CT_DIR_REPLY] = e;
+		fpdb_add(e);
+		goto del_entry;
+	}
+	rcu_read_unlock_bh();
+	return 0;
+
+del_entry:
+	if (unlikely((NETIF_INVALID(e->in_dev->dev)) || 
+		!(e->in_dev->dev->flags & IFF_UP)  || priv->fp_rmmoding)) {
+		pr_err("in_dev (%s) state invalid or is rmmoding, del entry!\n", e->in_dev->dev->name);
+		fpdb_del_by_dev(e->in_dev->dev);
+	}
+
+	if (unlikely((NETIF_INVALID(e->out_dev->dev)) || 
+		!(e->out_dev->dev->flags & IFF_UP) || priv->fp_rmmoding)) {
+		pr_err("out_dev (%s) state invalid or is rmmoding, del entry!\n", e->out_dev->dev->name);
+		fpdb_del_by_dev(e->out_dev->dev);
+	}
+	rcu_read_unlock_bh();
+	return 0;
+}
+
+static void new_connection_work(struct work_struct *w)
+{
+	struct learner_work *work;
+
+	work = container_of(w, struct learner_work, work.work);
+	BUG_ON(!work);
+
+	__add_new_connection(work->priv, work->ct, GFP_KERNEL);
+
+	/* release work */
+	spin_lock_bh(&work->priv->lock);
+	list_del(&work->list);
+	spin_unlock_bh(&work->priv->lock);
+	kfree(work);
+}
+
+static inline int add_new_connection_work(struct fp_learner *priv,
+					   struct nf_conn *ct)
+{
+	struct learner_work *work;
+
+	if (!learner_policy_check(priv, ct))
+		return -EINVAL;
+
+	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return -ENOMEM;
+
+	work->ct = ct;
+	work->priv = priv;
+	INIT_LIST_HEAD(&work->list);
+	INIT_DELAYED_WORK(&work->work, new_connection_work);
+
+	spin_lock_bh(&priv->lock);
+	list_add_tail(&work->list, &priv->work_items_list);
+	spin_unlock_bh(&priv->lock);
+
+	queue_delayed_work(priv->wq, &work->work, 0);
+
+	return 0;
+}
+
+
+static inline int add_new_connection_noblock(struct fp_learner *priv,
+					      struct nf_conn *ct)
+{
+	if (!learner_policy_check(priv, ct))
+		return -EINVAL;
+	return __add_new_connection(priv, ct, GFP_ATOMIC);
+}
+
+static inline int add_new_connection(struct fp_learner *priv,
+				      struct nf_conn *ct)
+{
+	if (fp_learner_wq)
+		return add_new_connection_work(priv, ct);
+	else
+		return add_new_connection_noblock(priv, ct);
+}
+
+/* check if this connection is waiting in our workqueue
+ * and cancle it if it is.
+ */
+static inline int
+new_connection_cancle(struct fp_learner *priv, struct nf_conn *ct)
+{
+	struct learner_work *work;
+
+	if (!fp_learner_wq)
+		return 0;
+
+	spin_lock_bh(&priv->lock);
+	list_for_each_entry(work, &priv->work_items_list, list) {
+		if (work->ct == ct) {
+			if (cancel_delayed_work(&work->work)) {
+				pr_debug("cancle connection add %p\n", ct);
+				list_del(&work->list);
+				kfree(work);
+			}
+			break;
+		}
+	}
+	spin_unlock_bh(&priv->lock);
+
+	return 0;
+}
+
+static int learner_ct_event(struct fp_learner *priv, struct nf_conn *ct,
+			     unsigned int type, unsigned int flags)
+{
+	if (type == IPCTNL_MSG_CT_DELETE) {
+		pr_debug("delete connection (%p)\n", ct);
+		return new_connection_cancle(priv, ct);
+	} else if (type == IPCTNL_MSG_CT_NEW) {
+		pr_debug("new connection (%p)\n", ct);
+		return add_new_connection(priv, ct);
+	}
+
+	pr_debug("Unhandled type=%u\n", type);
+	FP_DEBUG_DUMP_CONTRACK(NULL, ct);
+
+	return -ENOTSUPP;
+}
+
+static int fpdev_del_gb6(struct net_device *dev)
+{
+	struct fp_net_device *fpdev;
+
+	fpdev = fpdev_get_if(dev);
+	if (unlikely(!fpdev))
+		return 0;
+
+	memset(&fpdev->ll6addr, 0, sizeof(struct in6_addr));
+	memset(&fpdev->gb6addr, 0, sizeof(struct in6_addr));
+	fpdev->prefixlen = 0;
+	fpdev->mtu = 0;
+	fpdev_clear_ll6(fpdev);
+	fpdev_clear_gb6(fpdev);
+	fpdev_clear_mtu(fpdev);
+
+	fpdev_put(fpdev);
+
+	return 0;
+}
+
+/**
+ * handle netdevice events.
+ * 
+ * NETDEV_REGISTER
+ * new net_device is registered. A fastpath device is created
+ * and associated to it.
+ * 
+ * NETDEV_UNREGISTER
+ * net_device unregistered, delete the associated fastpath device. In
+ * addition, remove all conntracks related to this device - this will
+ * cause all the related fastpath database entries to be deleted thus allowing
+ * the device to be safely removed.
+ * 
+ * @note We can safely ignore NETDEV_UP / NETDEV_DOWN since it is
+ *       checked in the classifier anyway. Regarding other events -
+ *       will be added in the future if needed.
+ * @param dev
+ * @param event
+ * 
+ * @return NOTIFY_DONE
+ */
+static int
+__learner_netdev_event(struct net_device *dev, unsigned long event)
+{
+	switch (event) {
+	case NETDEV_REGISTER:
+		pr_debug("received netdev (%s) register, event %lu, state: 0x%lx, flags: 0x%x, invalid: %d\n",
+			dev->name, event, dev->state, dev->flags, NETIF_INVALID(dev));
+		fpdev_add_if(dev);
+		break;
+	case NETDEV_UNREGISTER:
+		printk(KERN_DEBUG "received netdev (%s) unregister, event %lu, state: 0x%lx, flags: 0x%x, invalid: %d\n",
+			dev->name, event, dev->state, dev->flags, NETIF_INVALID(dev));
+		fpdb_del_by_dev(dev);
+		fpdb_iterate(&fpdb_del_block_entry_by_dev, (void *)dev);
+		fpdev_del_if(dev);
+		break;
+	case NETDEV_DOWN:
+		fpdev_del_gb6(dev);
+		break;
+	default:
+		pr_debug("ignoring netdev %s event %lu, state: 0x%lx, flags: 0x%x, invalid: %d\n",
+			dev->name, event, dev->state, dev->flags, NETIF_INVALID(dev));
+	}
+
+	return NOTIFY_DONE;
+}
+
+/* main dispatcher for netdev events - bridge and loopback ignored */
+static int learner_netdev_event(struct notifier_block *nb,
+				    unsigned long event, void *ptr)
+{
+	struct net_device *dev;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
+	dev = ptr;
+#else
+	dev = ((struct netdev_notifier_info*)ptr)->dev;
+#endif
+
+	if ((dev->priv_flags & IFF_EBRIDGE) || (dev->flags & IFF_LOOPBACK))
+		return NOTIFY_DONE;
+
+	return __learner_netdev_event(dev, event);
+}
+
+static void learner_netdev_cleanup(struct notifier_block *nb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
+	struct net_device *dev;
+	struct net *net;
+
+	rtnl_lock();
+	for_each_net(net) {
+		for_each_netdev(net, dev) {
+			if (dev->flags & IFF_UP) {
+				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
+				nb->notifier_call(nb, NETDEV_DOWN, dev);
+			}
+			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
+		}
+	}
+	rtnl_unlock();
+#endif
+}
+
+static int fp_inet6addr_event(struct notifier_block *nb,
+				  unsigned long event, void *ptr)
+{
+	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+	struct net_device *dev = ifa->idev->dev;
+	struct fp_net_device *fpdev;
+	int addr_type;
+
+	if (event != NETDEV_UP)
+		return NOTIFY_DONE;
+
+	addr_type = ipv6_addr_type(&ifa->addr);
+	if (!(addr_type & IPV6_ADDR_LINKLOCAL))
+		return NOTIFY_DONE;
+
+	fpdev = fpdev_get_if(dev);
+	if (unlikely(!fpdev))
+		return NOTIFY_DONE;
+
+	if (!fpdev_is_ll6_set(fpdev)) {
+		memcpy(&fpdev->ll6addr, &ifa->addr, sizeof(ifa->addr));
+		fpdev_set_ll6(fpdev);
+	}
+
+	fpdev_put(fpdev);
+
+	return NOTIFY_DONE;
+}
+
+static int update_entry(struct fpdb_entry *e, void *data)
+{
+	struct fpdb_entry *ne = NULL;
+	struct fp_learner *fpl = (struct fp_learner *)data;
+	struct nf_conn_fastpath *fastpath;
+
+	spin_lock_bh(&e->lock);
+	fpdb_lock_bh();
+
+	/* Exit if CT destroied, in case fpdb get a wrong ct info */
+	if (e->state == ENTRY_DYING)
+		goto done;
+
+	fastpath = nfct_fastpath(e->ct);
+	if (unlikely(!fastpath))
+		goto done;
+
+	ne = connection_to_entry(fpl, e->ct, e->dir, GFP_ATOMIC);
+	if (!ne) {
+		/* The connection may become local but we do not want 
+		    to remove it from STACK so just block it */
+		e->block = 1;
+	} else {
+		if (ne->out_dev == e->out_dev &&
+		    ne->in_dev == e->in_dev &&
+		    nf_ct_tuple_equal(&ne->in_tuple, &e->in_tuple) &&
+		    nf_ct_tuple_equal(&ne->out_tuple, &e->out_tuple) &&
+		    !memcmp(&ne->hh, &e->hh, sizeof(struct hh_cache))) {
+			pr_debug("new fp entry equal old,no update\n");
+			fpdb_free(ne);
+			goto done;
+		}
+
+		/* if the old connection is blocked keep it blocked */
+		/* if ne->block is 1 and e->block is 0, there will be issue --yhuang 20160617*/
+		if (ne->block != 1)
+			ne->block = e->block;
+
+		if (ne->dir == IP_CT_DIR_REPLY)
+			fastpath->fpd_el[IP_CT_DIR_REPLY] = ne;
+		else
+			fastpath->fpd_el[IP_CT_DIR_ORIGINAL] = ne;
+
+		fpdb_replace(e, ne);
+	}
+
+done:
+	fpdb_unlock_bh();
+	spin_unlock_bh(&e->lock);
+
+	if (ne) {
+		if (unlikely((NETIF_INVALID(ne->in_dev->dev)) ||
+		    !(ne->in_dev->dev->flags & IFF_UP) || fpl->fp_rmmoding)) {
+			pr_err("in_dev (%s) state invalid or rmmoding, del!\n",
+				ne->in_dev->dev->name);
+			fpdb_del_by_dev(ne->in_dev->dev);
+		}
+
+		if (unlikely((NETIF_INVALID(ne->out_dev->dev)) ||
+			!(ne->out_dev->dev->flags & IFF_UP) || fpl->fp_rmmoding)) {
+			pr_err("out_dev (%s) state invalid or rmmoding, del!\n",
+				ne->out_dev->dev->name);
+			fpdb_del_by_dev(ne->out_dev->dev);
+		}
+	}
+	return 0;
+}
+
+static int block_entry(struct fpdb_entry *e, void *ptr)
+{
+	spin_lock_bh(&e->lock);
+	e->block = 1;
+	spin_unlock_bh(&e->lock);
+
+	return 0;
+}
+
+static void learner_ct_update_work(struct work_struct *work)
+{
+	struct fp_learner *fpl = container_of(work,
+		struct fp_learner, update_work);
+
+	fpdb_iterate(&update_entry, (void *)fpl);
+}
+
+void __learner_ct_update_all(struct fp_learner *fpl)
+{
+	schedule_work(&fpl->update_work);
+}
+
+static int learner_netevent(struct notifier_block *nb, unsigned long event, void *ctx)
+{
+	struct fp_learner *fpl = container_of(nb, struct fp_learner, netevent_notifier);
+
+	BUG_ON(!fpl);
+
+	if (event == NETEVENT_NEIGH_UPDATE) {
+		struct neighbour *n = ctx;
+
+		pr_debug("neighbor update received (state=%d, dev=%s)\n",
+			n->nud_state, n->dev->name);
+		__learner_ct_update_all(fpl);
+	} else if (event == NETEVENT_REDIRECT) {
+
+		__learner_ct_update_all(fpl);
+		pr_debug("neighbor redirect received\n");
+	} else {
+		pr_debug("mfp received netevent %lu, which no need to update ct\n", event);
+		WARN_ON(1);
+	}
+
+	return 0;
+}
+
+static void learner_rtnetlink_rcv(struct nlmsghdr *nlh, void *ptr)
+{
+	struct fp_learner *fpl = (struct fp_learner *)ptr;
+	switch (nlh->nlmsg_type) {
+	case RTM_NEWROUTE:
+	case RTM_DELROUTE:
+		pr_debug( "%s\n",
+		    nlh->nlmsg_type == RTM_NEWROUTE ? "RTM_NEWROUTE" :
+		    "RTM_DELROUTE");
+		__learner_ct_update_all(fpl);
+		break;
+	case RTM_NEWTFILTER:
+		pr_debug( "RTM_NEWTFILTER\n");
+		/* TODO: check if we need update in this case*/
+		break;
+	case RTM_DELTFILTER:
+		pr_debug( "RTM_DELTFILTER\n");
+		/* TODO: check if we need update in this case*/
+		break;
+	case RTM_GETTFILTER:
+		pr_debug( "RTM_GETTFILTER\n");
+		break;
+	}
+	pr_debug("handle routing netlink message, type=%d\n", nlh->nlmsg_type);
+	//TODO - add support for update_all_connections
+}
+
+static void learner_nfnetlink_rcv(struct sk_buff *skb,
+	struct nlmsghdr *nlh, void *ptr)
+{
+	struct fp_learner *priv = (struct fp_learner *)ptr;
+	unsigned int type = NFNL_MSG_TYPE(nlh->nlmsg_type);
+	unsigned int ssid = NFNL_SUBSYS_ID(nlh->nlmsg_type);
+	struct nf_conn *ct;
+	struct nf_conntrack_expect *exp;
+	unsigned int flags = nlh->nlmsg_flags;
+	int ret;
+
+	if (ssid == NFNL_SUBSYS_CTNETLINK) {
+		ct = __get_conntrack_from_nlmsg(skb, nlh);
+		if (ct == NULL) {
+			pr_debug("can't get nf conn type=%u, ssid=%u\n", type, ssid);
+			return;
+		}
+		pr_debug("found CTNETLINK connection %p, type=%u, ssid=%u\n", ct, type, ssid);
+	} else if (ssid == NFNL_SUBSYS_CTNETLINK_EXP) {
+		exp = __get_expect_from_nlmsg(skb, nlh);
+		if (exp == NULL) {
+			pr_err("can't get expect\n");
+			return;
+		}
+		ct = exp->master;
+		pr_debug("found CTNETLINK_EXP exp %p, master connection %p, type=%u, ssid=%u\n", exp, ct, type, ssid);
+	} else {
+		pr_err("unexpected ssid (%d)\n", ssid);
+		return;
+	}
+
+	/* dispatch events */
+	ret = learner_ct_event(priv, ct, type, flags);
+	if (ret < 0)
+		pr_debug("learner_ct_event failed with error code %d\n"
+			 "ct=%p, type=%u, flags=%u\n", ret, ct, type, flags);
+}
+
+/* Receive message from netlink and pass information to relevant function. */
+static void learner_nl_data_ready(struct sock *sk)
+{
+	int ret = 0;
+	int len;
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+
+	BUG_ON(!sk);
+	pr_debug("got a message (socket protocol=%d)\n", sk->sk_protocol);
+
+	while ((skb = skb_recv_datagram(sk, 0, 1, &ret)) == NULL) {
+		if (ret == -EAGAIN || ret == -ENOBUFS) {
+			pr_err("recvfrom() error %d\n", -ret);
+			return;
+		}
+	}
+
+	len = skb->len;
+	for (nlh = (struct nlmsghdr *)skb->data; NLMSG_OK(nlh, len);
+	    nlh = NLMSG_NEXT(nlh, len)) {
+		pr_debug("nlmsg_len %u, nlmsg_type %u\n", nlh->nlmsg_len, nlh->nlmsg_type);
+
+		/* Finish of reading. */
+		if (nlh->nlmsg_type == NFNL_MSG_TYPE(NLMSG_DONE))
+			goto out;
+
+		/* Error handling. */
+		if (nlh->nlmsg_type == NFNL_MSG_TYPE(NLMSG_ERROR)) {
+			pr_err("nl message error\n");
+			goto out;
+		}
+
+		if (sk->sk_protocol == NETLINK_ROUTE) {
+			learner_rtnetlink_rcv(nlh, sk->sk_user_data);
+		} else if (sk->sk_protocol == NETLINK_NETFILTER) {
+			learner_nfnetlink_rcv(skb, nlh, sk->sk_user_data);
+		} else {
+			pr_err("unrecognized sk_protocol (%u)\n", sk->sk_protocol);
+			goto out;
+		}
+	}
+out:
+	skb_orphan(skb);
+	kfree_skb(skb);
+	return;
+}
+
+static int learner_nl_open(void *priv, struct socket **s, int proto, int groups)
+{
+	struct socket *sock;
+	struct sockaddr_nl addr;
+	int rc, val = 1;
+
+	rc = sock_create_kern(&init_net, AF_NETLINK , SOCK_RAW, proto, &sock);
+	if (rc < 0) {
+		pr_err("create err (rc=%d)\n", rc);
+		return rc;
+	}
+
+	memset((void *)&addr, 0, sizeof(addr));
+	addr.nl_family = AF_NETLINK;
+	addr.nl_pid = 0;
+	addr.nl_groups = groups;
+	sock->sk->sk_user_data = priv;
+	sock->sk->sk_data_ready = learner_nl_data_ready;
+	sock->sk->sk_allocation = GFP_ATOMIC;
+
+	rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+	if (rc < 0) {
+		pr_err("bind err (rc=%d)\n", rc);
+		goto sock_err;
+	}
+
+	rc = kernel_setsockopt(sock, SOL_NETLINK, NETLINK_NO_ENOBUFS, (char *)&val, sizeof(val));
+	if (rc < 0) {
+		pr_err("setsockopt err (rc=%d)", rc);
+		goto sock_err;
+	}
+
+	pr_debug("netlink socket opened (proto=%u, groups=%u)\n", proto, groups);
+	*s = sock;
+	return 0;
+
+sock_err:
+	kernel_sock_shutdown(sock, SHUT_RDWR);
+	sock_release(sock);
+	return rc;
+}
+
+static void learner_nl_close(struct socket *sk)
+{
+	BUG_ON(!sk);
+	kernel_sock_shutdown(sk, SHUT_RDWR);
+	sock_release(sk);
+}
+
+static unsigned int fp_learner_nf_hook(void *priv,
+			       struct sk_buff *skb,
+			       const struct nf_hook_state *state)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+	struct nf_conn_fastpath *f;
+	struct fpdb_entry *e;
+	struct net_device *el_src, *el_dst;
+
+	if (!ct)
+		goto out;
+
+	f = nfct_fastpath(ct);
+	if (!f)
+		goto out;
+
+	rcu_read_lock_bh();
+	e = rcu_dereference(f->fpd_el[CTINFO2DIR(ctinfo)]);
+	/* Here we can not clear block simply. We need to check */
+	/* whether src dev equals to dst dev yhuang 20160622*/
+	if (unlikely(e && (e->block == true))) {
+		if ((e->state != ENTRY_ALIVE)
+			|| (e->dir >= IP_CT_DIR_MAX)) {
+			rcu_read_unlock_bh();
+			goto out;
+		}
+
+		/* skb->dev already set to destiniation device */
+		el_src = e->in_dev->dev;
+		el_dst = e->out_dev->dev;
+		if(unlikely((el_src == NULL) || (el_dst == NULL))) {
+			rcu_read_unlock_bh();
+			goto out;
+		}
+		if (unlikely(NETIF_INVALID(el_src) || NETIF_INVALID(el_dst))) {
+			rcu_read_unlock_bh();
+			goto out;
+		}
+
+		if (likely(el_src != el_dst)) {
+			spin_lock_bh(&e->lock);
+			e->block = 0;
+			spin_unlock_bh(&e->lock);
+			pr_debug("unblock entry:ct=%x dir=%d bucket=%x %x %x\n",
+				(unsigned int)e->ct, e->dir, e->bucket,
+				(unsigned int)(&e->in_tuple),
+				(unsigned int)(&e->out_tuple));
+		}
+	}
+	rcu_read_unlock_bh();
+out:
+	return NF_ACCEPT;
+}
+
+/** learner's netfilter hook */
+static struct nf_hook_ops nf_learner_hook_data[] __read_mostly = {
+	{
+		.hook = fp_learner_nf_hook,
+		.pf = NFPROTO_IPV4,
+		.hooknum = NF_INET_POST_ROUTING,
+		.priority = NF_IP_PRI_LAST,
+	},
+	{
+		.hook = fp_learner_nf_hook,
+		.pf = NFPROTO_IPV6,
+		.hooknum = NF_INET_POST_ROUTING,
+		.priority = NF_IP_PRI_LAST,
+	},
+};
+
+void __learner_ct_block_all(struct fastpath_module *m)
+{
+	fpdb_iterate(&block_entry, m->priv);
+}
+
+static ssize_t
+learner_ct_update_all(struct fastpath_module *m, const char *buf, size_t count)
+{
+	__learner_ct_update_all((struct fp_learner *)m->priv);
+	return count;
+}
+
+static ssize_t
+learner_ct_block_all(struct fastpath_module *m, const char *buf, size_t count)
+{
+	__learner_ct_block_all(m);
+	return count;
+}
+
+static ssize_t learner_policy_show(struct fastpath_module *m, char *buf)
+{
+	struct policy_entry *itr;
+	struct fp_learner *priv = m->priv;
+	int len;
+
+	len = scnprintf(buf, PAGE_SIZE, "dynamic policy restricted ports:\n");
+
+	spin_lock_bh(&priv->lock);
+	list_for_each_entry(itr, &priv->policy_list, list)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d, ", itr->port);
+	spin_unlock_bh(&priv->lock);
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+
+	return len;
+}
+
+static ssize_t learner_policy_store(struct fastpath_module *m,
+				     const char *buf, size_t count)
+{
+	char op;
+	unsigned int port;
+	struct policy_entry *entry, *tmp;
+	struct fp_learner *priv = m->priv;
+	if (sscanf(buf, "%c%u", &op, &port) != 2 || port > 0xFFFF)
+		return -EINVAL;
+	pr_err("Enter learner_policy_store:op=%c\n", op);
+	if (op == '-') {
+		/* remove port from the restricted list*/
+		spin_lock_bh(&priv->lock);
+		list_for_each_entry_safe(entry, tmp, &priv->policy_list, list)
+			if (entry && entry->port == port) {
+				list_del(&entry->list);
+				kfree(entry);
+			}
+		spin_unlock_bh(&priv->lock);
+	} else if (op == '+') {
+		/* add port to the restricted list*/
+		entry = kzalloc(sizeof(struct policy_entry), GFP_KERNEL);
+		if (!entry)
+			return -ENOMEM;
+
+		INIT_LIST_HEAD(&entry->list);
+		entry->port = port;
+
+		spin_lock_bh(&priv->lock);
+		list_add(&entry->list, &priv->policy_list);
+		spin_unlock_bh(&priv->lock);
+
+		fpdb_del_by_port(port);
+	} else {
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t learner_lookup_retries_store(struct fastpath_module *m,
+					     const char *buf, size_t count)
+{
+	unsigned int retries;
+	struct fp_learner *priv = m->priv;
+
+	if (sscanf(buf, "%u", &retries) != 1)
+		return -EINVAL;
+
+	priv->lookups_retries = retries;
+
+	return count;
+}
+
+static ssize_t learner_lookup_retries_show(struct fastpath_module *m, char *buf)
+{
+	struct fp_learner *priv = m->priv;
+	return scnprintf(buf, PAGE_SIZE, "%u\n", priv->lookups_retries);
+}
+
+static ssize_t learner_lookup_delay_store(struct fastpath_module *m,
+					   const char *buf, size_t count)
+{
+	unsigned int delay;
+	struct fp_learner *priv = m->priv;
+
+	if (sscanf(buf, "%u", &delay) != 1)
+		return -EINVAL;
+
+	priv->lookups_delay = delay;
+
+	return count;
+}
+
+static ssize_t learner_lookup_delay_show(struct fastpath_module *m, char *buf)
+{
+	struct fp_learner *priv = m->priv;
+	return scnprintf(buf, PAGE_SIZE, "%u[ms]\n", priv->lookups_delay);
+}
+
+static ssize_t learner_nfnl_groups_show(struct fastpath_module *m, char *buf)
+{
+	struct fp_learner *priv = m->priv;
+	struct sockaddr addr;
+	int rc;
+
+	rc = kernel_getsockname(priv->nf_nl_sock, &addr);
+	if (rc < 0)
+		return scnprintf(buf, PAGE_SIZE, "ERROR\n");
+
+	return scnprintf(buf, PAGE_SIZE, "0x%08x\n", ((struct sockaddr_nl *)&addr)->nl_groups);
+}
+
+static ssize_t learner_rtnl_groups_show(struct fastpath_module *m, char *buf)
+{
+	struct fp_learner *priv = m->priv;
+	struct sockaddr addr;
+	int rc;
+
+	rc = kernel_getsockname(priv->rt_nl_sock, &addr);
+	if (rc < 0)
+		return scnprintf(buf, PAGE_SIZE, "ERROR\n");
+
+	return scnprintf(buf, PAGE_SIZE, "0x%08x\n", ((struct sockaddr_nl *)&addr)->nl_groups);
+}
+
+static FP_ATTR(policy, S_IRUGO|S_IWUSR, learner_policy_show, learner_policy_store);
+static FP_ATTR(lookup_retries, S_IRUGO|S_IWUSR, learner_lookup_retries_show, learner_lookup_retries_store);
+static FP_ATTR(lookup_delay, S_IRUGO|S_IWUSR, learner_lookup_delay_show, learner_lookup_delay_store);
+static FP_ATTR(nfnl_groups, S_IRUGO, learner_nfnl_groups_show, NULL);
+static FP_ATTR(rtnl_groups, S_IRUGO, learner_rtnl_groups_show, NULL);
+static FP_ATTR(update, S_IWUSR, NULL, learner_ct_update_all);
+static FP_ATTR(block, S_IWUSR, NULL, learner_ct_block_all);
+
+static struct attribute *fp_learner_attrs[] = {
+	&fp_attr_policy.attr,
+	&fp_attr_lookup_retries.attr,
+	&fp_attr_lookup_delay.attr,
+	&fp_attr_nfnl_groups.attr,
+	&fp_attr_rtnl_groups.attr,
+	&fp_attr_update.attr,
+	&fp_attr_block.attr,
+	NULL, /* need to NULL terminate the list of attributes */
+};
+
+static int
+fp_learner_ioctl(struct fastpath_module *m, unsigned int cmd, void *data)
+{
+	BUG_ON(!m);
+
+	switch (cmd) {
+	case FASTPATH_NL_C_IPT_NOTIFY:
+		__learner_ct_block_all(m);
+		break;
+	default:
+		pr_err("unsupported command %u\n", cmd);
+		return -ENOTSUPP;
+	}
+
+	return 0;
+}
+
+static void fp_learner_release(struct kobject *kobj)
+{
+	struct fastpath_module *module = to_fpmod(kobj);
+	struct fp_learner *priv = module->priv;
+
+	priv->fp_rmmoding = 1;
+	nf_unregister_net_hooks(&init_net, nf_learner_hook_data, ARRAY_SIZE(nf_learner_hook_data));
+	unregister_inet6addr_notifier(&priv->inet6addr_notifier);
+	unregister_netevent_notifier(&priv->netevent_notifier);
+	learner_netdev_cleanup(&priv->netdev_notifier);
+	unregister_netdevice_notifier(&priv->netdev_notifier);
+	learner_nl_close(priv->nf_nl_sock);
+	learner_nl_close(priv->rt_nl_sock);
+
+	cancel_work_sync(&priv->update_work);
+	if (fp_learner_wq) {
+		flush_workqueue(priv->wq);
+		destroy_workqueue(priv->wq);
+	}
+
+	kfree(priv);
+	kfree(module);
+
+	pr_debug("fp_learner released\n");
+}
+
+static struct kobj_type ktype_learner = {
+	.sysfs_ops	= &fp_sysfs_ops,
+	.default_attrs	= fp_learner_attrs,
+	.release	= fp_learner_release,
+};
+
+static int fp_learner_probe(struct fastpath_module *module)
+{
+	struct fp_learner *priv;
+	int ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		pr_err("no memeory\n");
+		return -ENOMEM;
+	}
+
+	module->priv = priv;
+	snprintf(module->name, sizeof(module->name),"fp_learner");
+
+	spin_lock_init(&priv->lock);
+	INIT_LIST_HEAD(&priv->policy_list);
+	priv->lookups_retries = DEFAULT_LOOKUPS_RETRIES;
+	priv->fp_rmmoding = 0;
+
+	if (fp_learner_wq) {
+		INIT_LIST_HEAD(&priv->work_items_list);
+		priv->lookups_delay = DEFAULT_LOOKUPS_DELAY_MS;
+		priv->wq = create_singlethread_workqueue(module->name);
+		if (!priv->wq) {
+			pr_err("create workqueue failed\n");
+			ret = -EBUSY;
+			goto priv_kfree;
+		}
+	}
+
+	INIT_WORK(&priv->update_work, (void *)learner_ct_update_work);
+
+	rtnl_lock();
+	ret = learner_nl_open(priv, &priv->rt_nl_sock, NETLINK_ROUTE, RTNETLINK_GRP);
+	rtnl_unlock();
+	if (ret < 0) {
+		pr_err("learner_nl_open(NETLINK_ROUTE) failed (%d)\n", ret);
+		goto wq_destroy;
+	}
+
+	nfnl_lock(NFNL_SUBSYS_CTNETLINK);
+	ret = learner_nl_open(priv, &priv->nf_nl_sock, NETLINK_NETFILTER, NFNETLINK_GRP);
+	nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
+	if (ret < 0) {
+		pr_err("learner_nl_open(NETLINK_NETFILTER) failed (%d)\n", ret);
+		goto nl_close_rt;
+	}
+
+	priv->netdev_notifier.notifier_call = learner_netdev_event;
+	ret = register_netdevice_notifier(&priv->netdev_notifier);
+	if (ret < 0) {
+		pr_err("register_netdevice_notifier failed (%d)\n", ret);
+		goto nl_close_nf;
+	}
+
+	priv->netevent_notifier.notifier_call = learner_netevent;
+	ret = register_netevent_notifier(&priv->netevent_notifier);
+	if (ret < 0) {
+		pr_err("register_netevent_notifier failed (%d)\n", ret);
+		goto netdev_notifier_unreg;
+	}
+
+	priv->inet6addr_notifier.notifier_call = fp_inet6addr_event;
+	ret = register_inet6addr_notifier(&priv->inet6addr_notifier);
+	if (ret < 0) {
+		pr_err("register_inet6addr_notifier failed (%d)\n", ret);
+		goto netdev_netevent_unreg;
+	}
+
+	ret = nf_register_net_hooks(&init_net, nf_learner_hook_data, ARRAY_SIZE(nf_learner_hook_data));
+	if (ret < 0) {
+		pr_err("nf_register_hooks failed (%d)\n", ret);
+		goto in6_notifier_err;
+	}
+
+	kobject_init(&module->kobj, &ktype_learner);
+	ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
+	if (ret < 0) {
+		pr_err("kobject_add failed (%d)\n", ret);
+		goto nf_hooks_unreg;
+	}
+
+	kobject_uevent(&module->kobj, KOBJ_ADD);
+
+	pr_debug("fp_learner probed\n");
+	return 0;
+
+nf_hooks_unreg:
+	kobject_put(&module->kobj);
+	nf_unregister_net_hooks(&init_net, nf_learner_hook_data, ARRAY_SIZE(nf_learner_hook_data));
+in6_notifier_err:
+	unregister_inet6addr_notifier(&priv->inet6addr_notifier);
+netdev_netevent_unreg:
+	unregister_netevent_notifier(&priv->netevent_notifier);
+netdev_notifier_unreg:
+	learner_netdev_cleanup(&priv->netdev_notifier);
+	unregister_netdevice_notifier(&priv->netdev_notifier);
+nl_close_nf:
+	learner_nl_close(priv->nf_nl_sock);
+nl_close_rt:
+	learner_nl_close(priv->rt_nl_sock);
+wq_destroy:
+	if (fp_learner_wq) {
+		flush_workqueue(priv->wq);
+		destroy_workqueue(priv->wq);
+	}
+priv_kfree:
+	kfree(priv);
+
+	return ret;
+}
+
+static int fp_learner_remove(struct fastpath_module *module)
+{
+	kobject_put(&module->kobj);
+
+	pr_debug("fp_learner removed\n");
+	return 0;
+}
+
+struct fastpath_module_ops fp_learner_ops = {
+	.probe = fp_learner_probe,
+	.remove = fp_learner_remove,
+	.ioctl = fp_learner_ioctl,
+};
+
+module_param(fp_learner_wq, bool, S_IRUGO);
+MODULE_PARM_DESC(fp_learner_wq, "fastpath learner worqueue mode (default="
+				__MODULE_STRING(FP_LEARNER_WQ_DEFAULT) ")");
diff --git a/package/kernel/mfp/files/fp_ndisc.c b/package/kernel/mfp/files/fp_ndisc.c
new file mode 100644
index 0000000..e9e7dbc
--- /dev/null
+++ b/package/kernel/mfp/files/fp_ndisc.c
@@ -0,0 +1,257 @@
+/*
+ *	Fastpath Ndisc
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#include <net/ndisc.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+
+#include "fp_common.h"
+#include "fp_device.h"
+#include "fp_ndisc.h"
+
+#define IN6ADDR_LINKLOCAL_ADDR	\
+		{ { { 0xfe,0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
+
+const struct in6_addr in6addr_linklocal_addr = IN6ADDR_LINKLOCAL_ADDR;
+
+static struct nd_opt_hdr *fpnd_next_option(struct nd_opt_hdr *cur,
+					    struct nd_opt_hdr *end)
+{
+	int type;
+	if (!cur || !end || cur >= end)
+		return NULL;
+	type = cur->nd_opt_type;
+	do {
+		cur = ((void *)cur) + (cur->nd_opt_len << 3);
+	} while (cur < end && cur->nd_opt_type != type);
+	return cur <= end && cur->nd_opt_type == type ? cur : NULL;
+}
+
+static inline bool fpnd_icmp6_type_eq(struct sk_buff *skb, u32 type)
+{
+	struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+	struct icmp6hdr *icmph = (struct icmp6hdr *)(iph + 1);
+
+	if (likely(iph->nexthdr != IPPROTO_ICMPV6 ||
+		   icmph->icmp6_code != 0 ||
+		   icmph->icmp6_type != type)) {
+
+		return false;
+	}
+
+	return true;
+}
+
+bool fpnd_is_ra(struct sk_buff *skb)
+{
+	return fpnd_icmp6_type_eq(skb, NDISC_ROUTER_ADVERTISEMENT);
+}
+
+bool fpnd_is_rs(struct sk_buff *skb)
+{
+	return fpnd_icmp6_type_eq(skb, NDISC_ROUTER_SOLICITATION);
+}
+
+static void fpnd_set_pref(struct fp_net_device *src, u8 *opt, int len)
+{
+	struct prefix_info *pinfo;
+	__u32 valid_lft;
+	__u32 prefered_lft;
+	int addr_type;
+
+	pinfo = (struct prefix_info *) opt;
+
+	if (len < sizeof(struct prefix_info)) {
+		pr_debug("prefix option too short\n");
+		return;
+	}
+
+	addr_type = ipv6_addr_type(&pinfo->prefix);
+
+	if (addr_type & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL))
+		return;
+
+	valid_lft = ntohl(pinfo->valid);
+	prefered_lft = ntohl(pinfo->prefered);
+
+	if (prefered_lft > valid_lft) {
+		pr_debug("prefix option has invalid lifetime\n");
+		return;
+	}
+
+	src->prefixlen = pinfo->prefix_len;
+	memcpy(&src->gb6addr, &pinfo->prefix, sizeof(struct in6_addr));
+	fpdev_set_gb6(src);
+
+	pr_debug("prefix for dev (%s) is (%pI6c) len (%d), sending to USB\n",
+		src->dev->name, &pinfo->prefix, pinfo->prefix_len);
+}
+
+void fpnd_process_ra(struct net_device *src, struct sk_buff *skb)
+{
+	struct fp_net_device *fpdev;
+	struct ndisc_options ndopts;
+	struct nd_opt_hdr *p;
+	int optlen;
+	struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+	struct ra_msg *ra_msg = (struct ra_msg *)(iph + 1);
+
+	__u8 *opt = (__u8 *)(ra_msg + 1);
+
+	optlen = (skb->tail - (u8 *)ra_msg) - sizeof(struct ra_msg);
+
+	if (!(ipv6_addr_type(&iph->saddr) & IPV6_ADDR_LINKLOCAL)) {
+		pr_debug("source address is not link-local\n");
+		return;
+	}
+
+	if (optlen < 0) {
+		pr_debug("packet too short\n");
+		return;
+	}
+
+	if (!ndisc_parse_options(skb->dev, opt, optlen, &ndopts)) {
+		pr_debug("invalid ND options\n");
+		return;
+	}
+
+	fpdev = fpdev_get_if(src);
+	if (unlikely(!fpdev))
+		return;
+
+	if (ndopts.nd_opts_mtu) {
+		struct mtu_option *mtuinfo = (struct mtu_option *) ndopts.nd_opts_mtu;
+
+		fpdev->mtu = ntohl(mtuinfo->mtu);
+		fpdev_set_mtu(fpdev);
+	}
+
+	if (ndopts.nd_opts_pi) {
+		for (p = ndopts.nd_opts_pi;
+		     p;
+		     p = fpnd_next_option(p, ndopts.nd_opts_pi_end)) {
+			fpnd_set_pref(fpdev, (u8 *)p, (p->nd_opt_len) << 3);
+		}
+	}
+
+	fpdev_put(fpdev);
+}
+
+static void fp_ip6_nd_hdr(struct sk_buff *skb,
+			  const struct in6_addr *saddr,
+			  const struct in6_addr *daddr,
+			  int hop_limit, int len)
+{
+	struct ipv6hdr *hdr;
+
+	skb_push(skb, sizeof(*hdr));
+	skb_reset_network_header(skb);
+	hdr = ipv6_hdr(skb);
+
+	ip6_flow_hdr(hdr, 0, 0);
+
+	hdr->payload_len = htons(len);
+	hdr->nexthdr = IPPROTO_ICMPV6;
+	hdr->hop_limit = hop_limit;
+
+	hdr->saddr = *saddr;
+	hdr->daddr = *daddr;
+}
+
+static struct sk_buff *fp_ndisc_alloc_skb(struct net_device *dev,
+				       int len)
+{
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
+	struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
+	struct sk_buff *skb;
+
+	skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
+	if (!skb) {
+		printk("ndisc: %s failed to allocate an skb\n", __func__);
+		return NULL;
+	}
+
+	skb->protocol = htons(ETH_P_IPV6);
+	skb->dev = dev;
+
+	skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
+	skb_reset_transport_header(skb);
+
+	/* Manually assign socket ownership as we avoid calling
+	 * sock_alloc_send_pskb() to bypass wmem buffer limits
+	 */
+	skb_set_owner_w(skb, sk);
+
+	return skb;
+}
+
+int fpnd_process_rs(struct sk_buff *skb)
+{
+	struct sk_buff *nskb;
+	struct icmp6hdr *icmp6h;
+	struct ra_msg *msg;
+	struct prefix_info *prefix;
+	struct mtu_option *mtu;
+	struct fp_net_device *fpdev;
+	int err;
+
+	fpdev = fpdev_get_ccinet();
+	if (unlikely(!fpdev))
+		return 0;
+
+	nskb = fp_ndisc_alloc_skb(fpdev->dev, sizeof(*msg) + sizeof(struct prefix_info) + sizeof(struct mtu_option));
+	if (!nskb) {
+		fpdev_put(fpdev);
+		return 0;
+	}
+
+	msg = (struct ra_msg *)skb_put(nskb, sizeof(*msg));
+	*msg = (struct ra_msg) {
+		.icmph = {
+			.icmp6_type = NDISC_ROUTER_ADVERTISEMENT,
+			.icmp6_hop_limit = 0xFF,
+			.icmp6_rt_lifetime = htons(0xFFFF),
+		},
+	};
+
+	prefix = (struct prefix_info *)skb_put(nskb, sizeof(struct prefix_info));
+	prefix->type = ND_OPT_PREFIX_INFO;
+	prefix->length = 4;
+	prefix->prefix_len = fpdev->prefixlen;
+	prefix->autoconf = 1;
+	prefix->valid = htonl(0xFFFFFFFF);
+	prefix->prefered = htonl(0xFFFFFFFF);
+	memcpy(&prefix->prefix, &fpdev->gb6addr, sizeof(struct in6_addr));
+
+	mtu = (struct mtu_option *)skb_put(nskb, sizeof(struct mtu_option));
+	mtu->type = ND_OPT_MTU;
+	mtu->length = 1;
+	mtu->mtu = htonl(fpdev->mtu);
+
+	icmp6h = icmp6_hdr(nskb);
+	icmp6h->icmp6_cksum = csum_ipv6_magic(&in6addr_linklocal_addr, &fpdev->ll6addr, nskb->len,
+							 IPPROTO_ICMPV6,
+							 csum_partial(icmp6h,
+							 nskb->len, 0));
+
+	fp_ip6_nd_hdr(nskb, &in6addr_linklocal_addr, &fpdev->ll6addr, 0xFF, nskb->len);
+	err = netif_rx(nskb);
+	fpdev_put(fpdev);
+
+	if(!err) {
+		dev_kfree_skb_any(skb);
+		return 1;
+	}
+
+	return 0;
+}
+
+
diff --git a/package/kernel/mfp/files/fp_ndisc.h b/package/kernel/mfp/files/fp_ndisc.h
new file mode 100644
index 0000000..2ccbeb5
--- /dev/null
+++ b/package/kernel/mfp/files/fp_ndisc.h
@@ -0,0 +1,25 @@
+#ifndef __FP_NDISC_H__
+#define __FP_NDISC_H__
+
+
+struct dns_svr_option { //rfc 4339 & rfc 61016
+  __u8 type;
+  __u8 length;
+  __be16 reserved;
+  __be32 lifetime;
+  __be32 dns1[4];
+  __be32 dns2[4];
+}  __packed;
+
+struct mtu_option {
+	__u8 type;
+	__u8 length;
+	__be16 reserved;
+	__be32 mtu;
+}__attribute__((packed));
+
+bool fpnd_is_ra(struct sk_buff *skb);
+bool fpnd_is_rs(struct sk_buff *skb);
+void fpnd_process_ra(struct net_device *src, struct sk_buff *skb);
+int fpnd_process_rs(struct sk_buff *skb);
+#endif
diff --git a/package/kernel/mfp/files/fp_netlink.c b/package/kernel/mfp/files/fp_netlink.c
new file mode 100644
index 0000000..3237881
--- /dev/null
+++ b/package/kernel/mfp/files/fp_netlink.c
@@ -0,0 +1,114 @@
+/*
+ *	Fastpath Netlink Interface
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU FP_ERR( Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "mfp" " netlink:%s:%d: " fmt, __func__, __LINE__
+
+#include "fp_common.h"
+#include "fp_core.h"
+#include "fp_netlink.h"
+
+struct nla_policy fp_netlink_policy[FASTPATH_NL_A_MAX + 1] __maybe_unused = {
+	[FASTPATH_NL_A_MSG] = { .type = NLA_NUL_STRING },
+};
+
+/* Optional: Add rule parsing to avoid blocking all entries */
+static int fp_netlink_ipt_notify(struct sk_buff *skb, struct genl_info *info)
+{
+	struct fastpath_module *m = fp_module_get_by_name("fp_learner");
+	int ret = -EINVAL;
+
+	if (!m) {
+		pr_err("failed to get fp_learner module\n");
+		return -EINVAL;
+	}
+
+	if (m->ops && m->ops->ioctl) {
+		ret = m->ops->ioctl(m, FASTPATH_NL_C_IPT_NOTIFY, NULL);
+		goto out;
+	}
+
+out:
+	fp_module_put(m);
+	return ret;
+}
+
+static struct genl_ops fp_netlink_genl_ops[] = {
+	FASTPATH_NL_OP(FASTPATH_NL_C_IPT_NOTIFY, fp_netlink_ipt_notify),
+};
+
+static struct genl_family fp_netlink_family = {
+	.hdrsize = 0,
+	.name = "FASTPATH",
+	.version = 1,
+	.maxattr = FASTPATH_NL_A_MAX,
+	.ops     = fp_netlink_genl_ops,
+	.n_ops   = ARRAY_SIZE(fp_netlink_genl_ops),
+};
+
+static struct attribute *fp_netlink_attrs[] = {
+	NULL, /* need to NULL terminate the list of attributes */
+};
+
+static void fp_netlink_release(struct kobject *kobj)
+{
+	struct fastpath_module *module = to_fpmod(kobj);
+
+	genl_unregister_family(&fp_netlink_family);
+
+	pr_debug("fp_netlink released\n");
+	kfree(module);
+}
+
+static struct kobj_type ktype_netlink = {
+	.sysfs_ops	= &fp_sysfs_ops,
+	.default_attrs	= fp_netlink_attrs,
+	.release	= fp_netlink_release,
+};
+
+static int fp_netlink_probe(struct fastpath_module *module)
+{
+	int ret;
+
+
+	module->priv = NULL;
+	snprintf(module->name, sizeof(module->name),"fp_netlink");
+
+	ret = genl_register_family(&fp_netlink_family);
+
+	kobject_init(&module->kobj, &ktype_netlink);
+	ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
+	if (ret < 0) {
+		pr_err("kobject_add failed (%d)\n", ret);
+		goto err_register_ops;
+	}
+
+	kobject_uevent(&module->kobj, KOBJ_ADD);
+
+	pr_debug("fp_netlink probed\n");
+	return 0;
+
+err_register_ops:
+	kobject_put(&module->kobj);
+	genl_unregister_family(&fp_netlink_family);
+
+	return ret;
+}
+
+static int fp_netlink_remove(struct fastpath_module *module)
+{
+	kobject_put(&module->kobj);
+
+	pr_debug("fp_netlink removed\n");
+	return 0;
+}
+
+struct fastpath_module_ops fp_netlink_ops = {
+	.probe = fp_netlink_probe,
+	.remove = fp_netlink_remove,
+};
diff --git a/package/kernel/mfp/files/fp_netlink.h b/package/kernel/mfp/files/fp_netlink.h
new file mode 100644
index 0000000..92ea11e
--- /dev/null
+++ b/package/kernel/mfp/files/fp_netlink.h
@@ -0,0 +1,36 @@
+#ifndef FP_NETLINK_H
+#define FP_NETLINK_H
+
+enum {
+	FASTPATH_NL_A_UNSPEC,
+	FASTPATH_NL_A_MSG,
+	__FASTPATH_NL_A_MAX,
+};
+#define FASTPATH_NL_A_MAX (__FASTPATH_NL_A_MAX - 1)
+
+extern struct nla_policy fp_netlink_policy[];
+
+/* commands */
+enum {
+	FASTPATH_NL_C_UNSPEC,
+	FASTPATH_NL_C_IPT_NOTIFY,
+	__FASTPATH_NL_C_MAX,
+};
+#define FASTPATH_NL_C_MAX (__FASTPATH_NL_C_MAX - 1)
+
+#define FASTPATH_NL_OP(_cmd, _func)	\
+{					\
+	.cmd = _cmd,			\
+	.doit = _func,			\
+	.dumpit = NULL,			\
+	.flags  = GENL_ADMIN_PERM,	\
+}
+
+#define FASTPATH_NL_DUMP(_cmd, _func, _dump)	\
+{						\
+	.cmd = _cmd,				\
+	.doit = _func,				\
+	.dumpit = _dump,			\
+}
+
+#endif /* FP_NETLINK_H */
diff --git a/package/kernel/mfp/files/mfp.a b/package/kernel/mfp/files/mfp.a
new file mode 100644
index 0000000..ff862f7
--- /dev/null
+++ b/package/kernel/mfp/files/mfp.a
Binary files differ
diff --git a/package/kernel/mfp/files/version.txt b/package/kernel/mfp/files/version.txt
new file mode 100644
index 0000000..02223ca
--- /dev/null
+++ b/package/kernel/mfp/files/version.txt
@@ -0,0 +1 @@
+1a99141fab9339480bff05b94dea46af709b1892