ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/package/kernel/mfp/files/fp_classifier.c b/package/kernel/mfp/files/fp_classifier.c
new file mode 100644
index 0000000..268e47a
--- /dev/null
+++ b/package/kernel/mfp/files/fp_classifier.c
@@ -0,0 +1,902 @@
+/*
+ * Fast path Classifier
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "mfp" " classifier:%s:%d: " fmt, __func__, __LINE__
+
+#include "fp_common.h"
+#include "fp_database.h"
+#include "fp_device.h"
+#include "fp_core.h"
+
+struct fpc_stats {
+ u32 total;
+ u32 slow;
+ u32 fast;
+};
+
+static struct fpc_stats stats;
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0)
+static unsigned int udp_ct_timeout = UDP_DEFAULT_TIMEOUT;
+static unsigned int udp_ct_timeout_stream = UDP_DEFAULT_TIMEOUT_STREAM;
+static unsigned int tcp_ct_timeout = TCP_DEFAULT_TIMEOUT;
+#endif
+static int fp_acct_flag = 1;
+static int fp_ip_log_en = 255;
+static int fp_ip_log_pkt_num;
+static unsigned char *fp_ip_log_buf;
+static int fp_ip_log_index;
+#define ONE_IP_LOG_LEN 96
+
+//#define CONFIG_SET_HL_64
+
+static inline int fp_ip_decrease_ttl(struct sk_buff *skb)
+{
+ if (ip_hdr(skb)->version == 4)
+ return ip_decrease_ttl(ip_hdr(skb));
+ else
+ return --ipv6_hdr(skb)->hop_limit;
+}
+
+/* builds a tuple according to the parameters received) */
+static inline void build_tuple(const struct sk_buff *skb,
+ struct nf_conntrack_tuple *tuple)
+{
+ int proto;
+ struct udphdr *udph;
+ /* Fill l3 info */
+ if (ip_hdr(skb)->version == 4) {
+ tuple->src.l3num = AF_INET;
+ tuple->src.u3.ip = ip_hdr(skb)->saddr;
+ tuple->dst.u3.ip = ip_hdr(skb)->daddr;
+ proto = ip_hdr(skb)->protocol;
+ } else {
+ tuple->src.l3num = AF_INET6;
+ tuple->src.u3.in6 = ipv6_hdr(skb)->saddr;
+ tuple->dst.u3.in6 = ipv6_hdr(skb)->daddr;
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+
+ /* Fill l4 info*/
+ udph = (struct udphdr *)skb_transport_header(skb);
+ tuple->dst.protonum = proto;
+ tuple->dst.u.all = udph->dest;
+ tuple->src.u.all = udph->source;
+ tuple->dst.dir = 0;
+}
+
+static inline void log_ip_pkt(const struct sk_buff *skb, unsigned char *buf)
+{
+ struct tcphdr *tcph;
+ struct iphdr *piphdr;
+ struct ipv6hdr *pipv6hdr;
+ struct timespec64 ts;
+ piphdr = ip_hdr(skb);
+
+ ktime_get_real_ts64(&ts);
+ memcpy(buf, &ts.tv_sec, 8);
+ buf += 8;
+ memcpy(buf, &ts.tv_nsec, 4);
+ buf += 4;
+ /* Fill l3 info */
+ if (piphdr->version == 4) {
+ *buf = 4;
+ buf += 1;
+ *buf = piphdr->protocol;
+ buf += 1;
+ memcpy(buf, &piphdr->id, 2);
+ buf += 2;
+ memcpy(buf, &piphdr->tot_len, 2);
+ buf += 4;
+ memcpy(buf, &piphdr->saddr, 4);
+ buf += 16;
+ memcpy(buf, &piphdr->daddr, 4);
+ buf += 16;
+ } else {
+ pipv6hdr = ipv6_hdr(skb);
+ *buf = 6;
+ buf += 1;
+ *buf = pipv6hdr->nexthdr;
+ buf += 1;
+ *buf = 0;
+ *(buf+1) = 0;
+ buf += 2;
+ memcpy(buf, &pipv6hdr->payload_len, 2);
+ buf += 4;
+ memcpy(buf, &pipv6hdr->saddr, 16);
+ buf += 16;
+ memcpy(buf, &pipv6hdr->daddr, 16);
+ buf += 16;
+ }
+
+ /* Fill l4 info*/
+ tcph = (struct tcphdr *)skb_transport_header(skb);
+
+ memcpy(buf, &tcph->source, 2);
+ buf += 2;
+ memcpy(buf, &tcph->dest, 2);
+ buf += 2;
+
+ memcpy(buf, &tcph->seq, 4);
+ buf += 4;
+
+ memcpy(buf, &tcph->ack_seq, 4);
+ buf += 4;
+ memcpy(buf, ((char *)&tcph->ack_seq)+4, 2);
+ buf += 2;
+
+
+}
+
+/* checksum adjust (inline) */
+static inline void fpc_checksum(unsigned char *chksum,
+ unsigned char *optr, unsigned long olen,
+ unsigned char *nptr, unsigned long nlen,
+ int proto)
+{
+ long x, old, neu;
+
+ if (proto == IPPROTO_UDP && *(__sum16 *)chksum == 0)
+ return;
+
+ x = chksum[0] * 256 + chksum[1];
+ x = ~x & 0xFFFF;
+ while (olen) {
+ old = optr[0] * 256 + optr[1];
+ optr += 2;
+ x -= old & 0xffff;
+ if (x <= 0) {
+ x--;
+ x &= 0xffff;
+ }
+ olen -= 2;
+ }
+
+ while (nlen) {
+ neu = nptr[0] * 256 + nptr[1];
+ nptr += 2;
+ x += neu & 0xffff;
+ if (x & 0x10000) {
+ x++;
+ x &= 0xffff;
+ }
+ nlen -= 2;
+ }
+ x = ~x & 0xFFFF;
+ chksum[0] = (unsigned char)(x / 256);
+ chksum[1] = (unsigned char)(x & 0xff);
+}
+
+static inline int fp_hard_header(struct sk_buff *skb, struct fpdb_entry *e)
+{
+ struct hh_cache *hh = &e->hh;
+ int hh_len = hh->hh_len;
+ unsigned int hh_alen = 0;
+ unsigned int headroom;
+
+ if (!hh_len)
+ return 0;
+
+ headroom = skb_headroom(skb);
+ if (likely(hh_len <= HH_DATA_MOD)) {
+ hh_alen = HH_DATA_MOD;
+
+ /* this is inlined by gcc */
+ if (likely(headroom >= HH_DATA_MOD))
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+ HH_DATA_MOD);
+ } else {
+ hh_alen = HH_DATA_ALIGN(hh_len);
+
+ if (likely(headroom >= hh_alen))
+ memcpy(skb->data - hh_alen, hh->hh_data,
+ hh_alen);
+ }
+
+ if (WARN_ON_ONCE(headroom < hh_alen))
+ return 1;
+
+ skb_push(skb, hh_len);
+
+ return 0;
+}
+
+/**
+ * Refresh ct (reschedule timeout)
+ *
+ * @param skb
+ * @param el
+ * @param acct do accounting
+ */
+static inline void fpc_refresh(struct sk_buff *skb, struct fpdb_entry *el, int acct)
+{
+ struct nf_conn *ct = el->ct;
+ const struct nf_conntrack_l4proto *l4proto;
+ enum ip_conntrack_info ctinfo = el->dir ? IP_CT_IS_REPLY : 0;
+ unsigned long extra_jiffies = 0;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0)
+ unsigned int *timeouts;
+#endif
+
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+ NF_CT_ASSERT(l4proto);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0)
+ if (l4proto->l4proto == IPPROTO_TCP) {
+ timeouts = nf_tcp_pernet(nf_ct_net(ct))->timeouts;
+ WARN_ON(ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED);
+ extra_jiffies = timeouts[TCP_CONNTRACK_ESTABLISHED];
+ } else if (l4proto->l4proto == IPPROTO_UDP) {
+ timeouts = nf_udp_pernet(nf_ct_net(ct))->timeouts;
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+ extra_jiffies = timeouts[UDP_CT_REPLIED];
+ else
+ extra_jiffies = timeouts[UDP_CT_UNREPLIED];
+ }
+#else
+ if (l4proto->l4proto == IPPROTO_TCP) {
+ WARN_ON(ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED);
+ extra_jiffies = tcp_ct_timeout;
+ } else if (l4proto->l4proto == IPPROTO_UDP) {
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+ extra_jiffies = udp_ct_timeout_stream;
+ else
+ extra_jiffies = udp_ct_timeout;
+ }
+#endif
+ __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, acct);
+
+ fpdb_trace(el, (l4proto->l4proto == IPPROTO_TCP) ? tcp_hdr(skb) : NULL);
+}
+
+/**
+ * Modify skb as if it was forwarded by the ip stack:
+ * L2: Add MAC Header, set skb->pkt_type = PACKET_HOST
+ * L3: Decrement ttl, NAT, checksum
+ * L4: Checksum
+ *
+ * @param skb skb to modify
+ * @param el fpdb_entry related to this connection
+ */
+static inline int fpc_modify(struct sk_buff *skb,
+ struct fpdb_entry *el)
+{
+ int version = ip_hdr(skb)->version;
+ int proto = (version == 4) ? ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+ struct udphdr *udph = udp_hdr(skb);
+ struct tcphdr *tcph = tcp_hdr(skb);
+
+ /**
+ * skb->pkt_type can be either PACKET_HOST or PACKET_OTHERHOST
+ * (see fpc_classify_start). We also know that this flow passed
+ * through slowpath (otherwise fastpath connection would not
+ * have been created in the first place). Therefore it is safe
+ * to change the pkt_type since this is what the IP Stack would
+ * have done.
+ *
+ * Slowpath behavior:
+ * PACKET_OTHERHOST is set by the receiving interface if the
+ * dest MAC is different from it's MAC address. In this case
+ * this means that the packet is not destined to us and is
+ * dropped. The only exception is if the receiving interface is
+ * behind a bridge. In this case, the dest MAC in packets sent
+ * outside the LAN is the bridge MAC address, in which case the
+ * bridging code sets the pkt_type to PACKET_HOST before
+ * routing the packet. Packes withing the LAN sre bridged and
+ * are not passed to the upper layers, and therefore doesn't go
+ * through fastpath unless CONFIG_BRIDGE_NETFILTER is enabled -
+ * which is the only case where fastpath "misbehaves" and sets
+ * the pkt_type to PACKET_HOST for bridged packets - this might
+ * need revision in the future.
+ */
+ skb->pkt_type = PACKET_HOST;
+
+ if (fp_hard_header(skb, el))
+ return 1;
+
+ fp_ip_decrease_ttl(skb);
+
+ /* NAT (incase used by this connection) */
+ if (NF_CT_NAT(el->ct)) {
+ void *old, *new;
+ unsigned int size;
+ __sum16 *check;
+
+ /* NAT L3 ip addresses manipulation */
+ if (likely(version == 4)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->saddr = el->out_tuple.dst.u3.ip;
+ iph->daddr = el->out_tuple.src.u3.ip;
+ #ifdef CONFIG_SET_HL_64
+ iph->ttl = 64;
+ #endif
+ ip_send_check(iph); /*IPv4 checksum */
+ } else {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ iph->saddr = el->out_tuple.dst.u3.in6;
+ iph->daddr = el->out_tuple.src.u3.in6;
+ #ifdef CONFIG_SET_HL_64
+ iph->hop_limit = 64;
+ #endif
+ }
+
+ /* Adjust transport header checksum */
+ check = (proto == IPPROTO_UDP) ? &udph->check : &tcph->check;
+ size = (version == 4) ? 4 : 16;
+ old = &el->in_tuple.src.u3.in6;
+ new = &el->out_tuple.dst.u3.in6;
+ fpc_checksum((u8 *)check, old, size, new, size, proto);
+ old = &el->in_tuple.dst.u3.in6;
+ new = &el->out_tuple.src.u3.in6;
+ fpc_checksum((u8 *)check, old, size, new, size, proto);
+
+
+ /* NAT L4 ports manipulation */
+ size = sizeof(__be16);
+ old = &el->in_tuple.dst.u.all;
+ new = &el->out_tuple.src.u.all;
+ if (*(__be16 *)old != *(__be16 *)new) {
+ udph->dest = *(__be16 *)new;
+ fpc_checksum((u8 *)check, old, size, new, size, proto);
+ }
+ old = &el->in_tuple.src.u.all;
+ new = &el->out_tuple.dst.u.all;
+ if (*(__be16 *)old != *(__be16 *)new) {
+ udph->source = *(__be16 *)new;
+ fpc_checksum((u8 *)check, old, size, new, size, proto);
+ }
+ }
+
+ return 0;
+}
+
+static inline bool ipv4_is_fragmented(struct iphdr *iph)
+{
+ __be16 df = iph->frag_off & htons(IP_DF);
+ return (iph->frag_off && !df);
+}
+
+static inline int parse_headers(struct sk_buff *skb)
+{
+ int ihl, proto;
+
+ BUG_ON(!skb);
+ skb_reset_network_header(skb);
+
+ /* L3 Protocol parsing */
+ if (likely(ip_hdr(skb)->version == 4)) {
+ ihl = ip_hdr(skb)->ihl * 4;
+ proto = ip_hdr(skb)->protocol;
+
+ /*ipv4 sanity checks*/
+ if (unlikely(ihl > sizeof(struct iphdr))) {
+ pr_debug("ipv4 options in header\n");
+ return 0;
+ }
+ /* check ttl */
+ if (unlikely(ip_hdr(skb)->ttl == 1)) {
+ pr_debug("ip->ttl==1\n");
+ return 0;
+ }
+ /* check fragmantation */
+ if (unlikely(ipv4_is_fragmented(ip_hdr(skb)))) {
+ pr_debug("fragmented packet (frag_offs=%x)\n",
+ ntohs(ip_hdr(skb)->frag_off));
+ return 0;
+ }
+ /* ipv4 reassembled pkts */
+ if (unlikely(skb->data_len)) {
+ pr_debug("ipv4 reassembled pkts --> send to slowpath\n");
+ return 0;
+ }
+ } else if (likely(ip_hdr(skb)->version == 6)) {
+ ihl = sizeof(struct ipv6hdr); /* without extentions */
+ proto = ipv6_hdr(skb)->nexthdr;
+
+ /* ipv6 sanity checks */
+ if (unlikely(ipv6_hdr(skb)->hop_limit == 1)) {
+ pr_debug("ip->ttl==1 --> send to slowpath\n");
+ return 0;
+ }
+
+ /* ipv6 reassembled pkts */
+ if (unlikely(skb->data_len)) {
+ pr_debug("ipv6 reassembled pkts --> send to slowpath\n");
+ return 0;
+ }
+ } else {
+ /* Not an IP packet (neither ipv4 nor ipv6) */
+ pr_debug("not an IP packet\n");
+ return 0;
+ }
+
+ /* L4 Protocol parsing */
+ skb_set_transport_header(skb, ihl);
+
+ if (proto == IPPROTO_TCP) {
+ struct tcphdr *th = tcp_hdr(skb);
+
+ if (tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_FIN)) {
+ pr_debug("tcp rst or fin\n");
+ return 0;
+ }
+ } else if (proto != IPPROTO_UDP) {
+ pr_debug("not a TCP or UDP packet\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+#define NETIF_INVALID(x) (!(x) || !netif_device_present(x) || \
+ !netif_running(x) || !netif_carrier_ok(x))
+
+/**
+ * finish classification for this database entry.
+ * If skb is not NULL, it is tracked & mangled.
+ *
+ * @param skb skb to mangle & track, or NULL if not desired
+ * @param el fpdb_entry previously aquired by fpc_classify
+ */
+int fpc_classify_finish(struct sk_buff *skb, struct fpdb_entry *el)
+{
+ int ret = 0;
+
+ if (skb) {
+ fpc_refresh(skb, el, fp_acct_flag);
+ if (fpc_modify(skb, el)) {
+ ret = 1;
+ goto exit;
+ }
+
+ /* update timestamp if fpdb used */
+ el->tstamp = jiffies;
+ if (!el->tstamp)
+ el->tstamp = 1;
+ }
+exit:
+ fpdb_put(el);
+ return ret;
+}
+
+/**
+ * Classifies an skb as fast or slow, without changing the skb.
+ * Caller MUST call fpc_classify_finish to free the database entry.
+ *
+ * @param skb skb to classify
+ *
+ * @return fpdb_entry for this skb
+ */
+struct fpdb_entry *fpc_classify_start(struct sk_buff *skb, struct nf_conntrack_tuple *tuple)
+{
+ struct fpdb_entry *el = NULL;
+ struct net_device *src, *dst;
+ int tmp_log_pkt_index;
+ unsigned char *plog_pos;
+
+ BUG_ON(!skb);
+ BUG_ON(!skb->dev); /* eth_type_trans always sets skb->dev - we count on it here */
+
+ src = skb->dev;
+ stats.total++;
+
+ if (unlikely(skb_headroom(skb) < ETH_HLEN)) {
+ pr_debug("No room for MAC header in skb\n");
+ goto slowpath;
+ }
+
+ /* source device sanity checks */
+ if (unlikely(NETIF_INVALID(src))) {
+ pr_debug("src (%s) state invalid (%lu)\n", src->name, src->state);
+ goto slowpath;
+ }
+
+ memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
+ if (unlikely(!parse_headers(skb)))
+ goto slowpath;
+
+ /* Check fp_database for match */
+ build_tuple(skb, tuple);
+ if (1 == fp_ip_log_en) {
+ tmp_log_pkt_index = fp_ip_log_index++;
+ if (fp_ip_log_index > fp_ip_log_pkt_num - 50)
+ fp_ip_log_index = 0;
+
+ plog_pos = fp_ip_log_buf + tmp_log_pkt_index*ONE_IP_LOG_LEN;
+ log_ip_pkt(skb, plog_pos);
+ }
+ el = fpdb_get(tuple);
+ if (unlikely(!el))
+ goto slowpath;
+
+ if (unlikely(el->block)) {
+ pr_debug("entry blocked, send to slowpath\n");
+ goto slowpath;
+ }
+
+ if (unlikely(nf_ct_protonum(el->ct) == IPPROTO_TCP) &&
+ el->ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) {
+ pr_debug("tcp connection state not established\n");
+ goto slowpath;
+ }
+
+ if (unlikely(el->in_dev->dev != src &&
+ el->in_dev->br != src)) {
+ /**
+ * Since entry can be updated (due to route changes) this case
+ * is legal for a short period of time in which packets are
+ * received using the old entry and transmitted using the new
+ * one. Since we dont knwo if this is the case or not we will
+ * just forward this packets to slowpath to decide what to do.
+ */
+ pr_debug("in_dev->dev=%s(%p) != src=%s(%p)\n",
+ el->in_dev->dev->name, el->in_dev->dev, src->name, src);
+ goto slowpath;
+ }
+
+ if (unlikely(!el->in_dev->forward || !el->out_dev->forward)) {
+ pr_debug("forwarding disabled (%s forward=%d, %s forward=%d)\n",
+ el->in_dev->dev->name, el->in_dev->forward,
+ el->out_dev->dev->name, el->out_dev->forward);
+ goto slowpath;
+ }
+
+ dst = el->out_dev->dev;
+ if (unlikely(NETIF_INVALID(dst))) {
+ pr_debug("dst (%s) state invalid (%lu)\n", dst->name, dst->state);
+ goto slowpath;
+ }
+
+ if (unlikely(dst->mtu < skb->len)) {
+ pr_info_once("mtu (%d) < len (%d)\n", dst->mtu, skb->len);
+ goto slowpath;
+ }
+
+ if (unlikely(dst == src)) {
+ /* src == dst entries should be blocked, it's a bug otherwise */
+ /* here we don't need to dump entry. It will cause assert */
+ /* because it takes a lot of time yhuang 20160622 */
+ pr_err("Bug in classifier dst_dev==src_dev(%s), block=%d\n",
+ src->name, (unsigned int)el->block);
+ /* FP_ERR_DUMP_ENTRY(NULL, el); */
+ /* BUG_ON(debug_level & DBG_WARN_AS_ERR); */
+ goto slowpath;
+
+ }
+
+ if (unlikely(dst->header_ops && !el->hh.hh_len)) {
+ pr_debug("hh_cache not valid, send to slowpath\n");
+ goto slowpath;
+ }
+
+ if (unlikely(skb->pkt_type != PACKET_HOST &&
+ skb->pkt_type != PACKET_OTHERHOST)) {
+ pr_debug("invalid skb->pkt_type(%d)\n", skb->pkt_type);
+ goto slowpath;
+ }
+
+ pr_debug("Packet from %s to %s (pkt_p %p len %d) classified as fast path\n",
+ src->name, dst->name, skb->data, skb->len);
+ stats.fast++;
+ return el;
+
+slowpath:
+ if (el)
+ fpdb_put(el);
+ pr_debug("Packet from %s (pkt_p %p len %d) classified as slow path\n",
+ src->name, skb->data, skb->len);
+ stats.slow++;
+ return NULL;
+
+}
+
+
+/**
+ * classify, mangle, track and hold the output device
+ * Caller MUST release the device with fp_dev_put() once finished.
+ *
+ * @param skb skb to classify and mangle
+ *
+ * @return destination fp_net_device or NULL if classified as
+ * slow path
+ */
+struct fp_net_device *fpc_classify(struct sk_buff *skb)
+{
+ struct fpdb_entry *el;
+ struct fp_net_device *fdev;
+ struct nf_conntrack_tuple tuple;
+
+ el = fpc_classify_start(skb, &tuple);
+ if (unlikely(!el))
+ return NULL;
+ fdev = fpdev_hold(el->out_dev);
+ if (fpc_classify_finish(skb, el))
+ return NULL;
+
+ return fdev;
+}
+
+static ssize_t stats_show(struct fastpath_module *m, char *buf)
+{
+ int len;
+
+ len = sprintf(buf, "Fast Path Classifier statistics:\n");
+
+ len += sprintf(buf + len, "Total Classified %d ", stats.total);
+ len += sprintf(buf + len, "(Fast %d, Slow %d)\n", stats.fast, stats.slow);
+
+ return len;
+}
+
+static ssize_t stats_clear(struct fastpath_module *m, const char *buf,
+ size_t count)
+{
+ pr_debug("reset stats...\n");
+ memset(&stats, 0, sizeof(stats));
+ return count;
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0)
+static ssize_t udp_ct_timeout_set(struct fastpath_module *m, const char *buf,
+ size_t count)
+{
+ unsigned int sec;
+ sscanf(buf, "%u", &sec);
+ udp_ct_timeout = sec * HZ;
+ return count;
+}
+
+static ssize_t udp_ct_timeout_get(struct fastpath_module *m, char *buf)
+{
+ unsigned int sec = udp_ct_timeout / HZ;
+ return sprintf(buf, "%u\n", sec);
+}
+
+static ssize_t tcp_ct_timeout_set(struct fastpath_module *m, const char *buf,
+ size_t count)
+{
+ unsigned int sec;
+ sscanf(buf, "%u", &sec);
+ tcp_ct_timeout = sec * HZ;
+ return count;
+}
+
+static ssize_t tcp_ct_timeout_get(struct fastpath_module *m, char *buf)
+{
+ unsigned int sec = tcp_ct_timeout / HZ;
+ return sprintf(buf, "%u\n", sec);
+}
+
+
+static FP_ATTR(udp_ct_timeout, S_IRUGO|S_IWUSR, udp_ct_timeout_get, udp_ct_timeout_set);
+static FP_ATTR(tcp_ct_timeout, S_IRUGO|S_IWUSR, tcp_ct_timeout_get, tcp_ct_timeout_set);
+#endif
+
+static ssize_t fp_acct_set(struct fastpath_module *m, const char *buf,
+ size_t count)
+{
+ int flag;
+ sscanf(buf, "%d", &flag);
+ fp_acct_flag = flag;
+ return count;
+}
+
+static ssize_t fp_acct_get(struct fastpath_module *m, char *buf)
+{
+ int flag = fp_acct_flag;
+ return sprintf(buf, "%d\n", flag);
+}
+
+
+static ssize_t fp_ip_log_set(struct fastpath_module *m, const char *buf,
+ size_t count)
+{
+ int flag;
+ int old_flag;
+ int num;
+ int ret;
+ struct file *filep;
+ mm_segment_t old_fs;
+
+ sscanf(buf, "%d", &flag);
+ switch (flag) {
+ case 0:
+ fp_ip_log_en = flag;
+ pr_err("fp_ip_log_set: disable ip_log:fp_ip_log_index=%d to 0\n",
+ fp_ip_log_index);
+ fp_ip_log_index = 0;
+ break;
+ case 1:
+ fp_ip_log_index = 0;
+ sscanf(buf, "%d,%d", &flag, &num);
+
+ if (fp_ip_log_buf == NULL) {
+ fp_ip_log_buf = kzalloc(ONE_IP_LOG_LEN*num, GFP_KERNEL);
+ if (fp_ip_log_buf == NULL)
+ pr_err("fp_ip_log_set: %d,%d,%d, but malloc failed\n",
+ flag, num, fp_ip_log_index);
+ else
+ pr_err("fp_ip_log_set: %d,%d,%d, buf=%x, size=%d\n",
+ flag, num, fp_ip_log_index,
+ (unsigned int)fp_ip_log_buf,
+ num*ONE_IP_LOG_LEN);
+ } else {
+
+ pr_err(" fp_ip_log_set: buffer has been allocated:%d\n",
+ fp_ip_log_pkt_num);
+ }
+ fp_ip_log_pkt_num = num;
+ fp_ip_log_en = flag;
+ break;
+
+ case 2:
+ old_flag = fp_ip_log_en;
+ pr_err("fp_ip_log_set: output buf to file(tmp/iplog.txt):\
+ old_flag=%d index=%d\n",
+ old_flag, fp_ip_log_index);
+ fp_ip_log_en = 2;
+/*Don't delete this part of code. It's for reference on data structure
+ {
+ char* pex_log_pos;
+ unsigned int* ptime_h;
+ unsigned int* ptime_l;
+ unsigned short* pver;
+ unsigned short* ppro;
+ unsigned short* plen;
+ unsigned int* psadd;
+ unsigned int* pdadd;
+ unsigned short* psport;
+ unsigned short* pdport;
+ unsigned int* pseq;
+ unsigned int* pack_seq;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ pex_log_pos = fp_ip_log_buf+i*ONE_IP_LOG_LEN;
+ ptime_h = (unsigned int*)pex_log_pos;
+ pex_log_pos +=4;
+ ptime_l = (unsigned int*)pex_log_pos;
+ pex_log_pos +=4;
+ pver = (unsigned short*)pex_log_pos;
+ pex_log_pos +=2;
+ ppro = (unsigned short*)pex_log_pos;
+ pex_log_pos +=2;
+ plen = (unsigned short*)pex_log_pos;
+ pex_log_pos +=4;
+ psadd = (unsigned int*)pex_log_pos;
+ pex_log_pos += 16;
+ pdadd = (unsigned int*) pex_log_pos;
+ pex_log_pos+=16;
+ psport = (unsigned short*) pex_log_pos;
+ pex_log_pos +=2;
+ pdport = (unsigned short*) pex_log_pos;
+ pex_log_pos+=2;
+ pseq = (unsigned int*)pex_log_pos;
+ pex_log_pos +=4;
+ pack_seq =(unsigned int*)pex_log_pos;
+
+ pr_err("Time:%x %x, ver*pro:%x, pid:%x, len:%x,
+ sadd:%x, dadd:%x, sport:%x, dport:%x,
+ seq;%x, ack_seq:%x\n",
+ *ptime_h, *ptime_l, *pver, *ppro, *plen,
+ *psadd, *pdadd, *psport, *pdport,
+ *pseq, *pack_seq);
+ }
+ }
+*/
+ filep = filp_open("/tmp/iplog.bin", O_RDWR|O_CREAT, 0644);
+ if (IS_ERR(filep)) {
+ pr_err("fp_ip_log_set: fail to open IP log file\n");
+ } else {
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ filep->f_pos = 0;
+ ret = filep->f_op->write(filep, fp_ip_log_buf,
+ ONE_IP_LOG_LEN*fp_ip_log_pkt_num,
+ &filep->f_pos);
+ set_fs(old_fs);
+ pr_err("fp_ip_log_set: write to /tmp/iplog.bin, ret=%d\n",
+ ret);
+ }
+ filp_close(filep, NULL);
+ fp_ip_log_en = old_flag;
+ break;
+ case 3:
+ fp_ip_log_en = flag;
+ if (fp_ip_log_buf != NULL) {
+ kfree(fp_ip_log_buf);
+ pr_err("fp_ip_log_set: free the buffer\n");
+ fp_ip_log_buf = NULL;
+ } else {
+ pr_err("fp_ip_log_set: buffer is NULL\n");
+ }
+ break;
+ default:
+ fp_ip_log_en = flag;
+ pr_err("fp_ip_log_set: not support this command:\
+ %d, but the log will stop\n", flag);
+ break;
+ }
+ return count;
+}
+
+static ssize_t fp_ip_log_get(struct fastpath_module *m, char *buf)
+{
+ int flag = fp_ip_log_en;
+ int num = fp_ip_log_pkt_num;
+ return sprintf(buf, "%d,%d buf=%x\n",
+ flag,
+ num,
+ (unsigned int)fp_ip_log_buf);
+}
+
+
+static FP_ATTR(fp_acct_flag, S_IRUGO|S_IWUSR, fp_acct_get, fp_acct_set);
+static FP_ATTR(fp_ip_log, S_IRUGO|S_IWUSR, fp_ip_log_get, fp_ip_log_set);
+static FP_ATTR(stats, S_IRUGO|S_IWUSR, stats_show, stats_clear);
+
+static struct attribute *fp_classifier_attrs[] = {
+ &fp_attr_stats.attr,
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0)
+ &fp_attr_udp_ct_timeout.attr,
+ &fp_attr_tcp_ct_timeout.attr,
+#endif
+ &fp_attr_fp_acct_flag.attr,
+ &fp_attr_fp_ip_log.attr,
+ NULL, /* need to NULL terminate the list of attributes */
+};
+
+static void fp_classifier_release(struct kobject *kobj)
+{
+ struct fastpath_module *module = to_fpmod(kobj);
+
+ pr_debug("fp_classifier released\n");
+ kfree(module);
+}
+
+static struct kobj_type ktype_classifier = {
+ .sysfs_ops = &fp_sysfs_ops,
+ .default_attrs = fp_classifier_attrs,
+ .release = fp_classifier_release,
+};
+
+static int fp_classifier_probe(struct fastpath_module *module)
+{
+ int ret;
+
+ module->priv = NULL;
+ snprintf(module->name, sizeof(module->name), "fp_classifier");
+
+ kobject_init(&module->kobj, &ktype_classifier);
+ ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
+ if (ret < 0) {
+ pr_err("kobject_add failed (%d)\n", ret);
+ kobject_put(&module->kobj);
+ return ret;
+ }
+ kobject_uevent(&module->kobj, KOBJ_ADD);
+
+ pr_debug("fp_classifier probed\n");
+ return 0;
+}
+
+static int fp_classifier_remove(struct fastpath_module *module)
+{
+ kobject_put(&module->kobj);
+
+ pr_debug("fp_classifier removed\n");
+ return 0;
+}
+
+struct fastpath_module_ops fp_classifier_ops = {
+ .probe = fp_classifier_probe,
+ .remove = fp_classifier_remove,
+};
+