[Feature][T106_eSDK]T106-V2.01.01.02P56U06.AP.15.05_CAP.15.05(SDK4.4)diff_15.11
Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No
Change-Id: Ief12bb341bd859dd73c7a8c5fa3d25e5ba7e1c6d
diff --git a/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.c b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.c
new file mode 100755
index 0000000..bea60e1
--- /dev/null
+++ b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.c
@@ -0,0 +1,1269 @@
+/*******************************************************************************
+ * Include header files *
+ ******************************************************************************/
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <net/sock.h>
+#include <uapi/linux/sched/types.h>
+#include "zvnet_dev.h"
+#include "ram_config.h"
+#include <net/netfilter/nf_conntrack.h>
+
+/*******************************************************************************
+ * Macro definitions *
+ ******************************************************************************/
+#define USE_ZVNET_PACKET
+
+#define WATCHDOG_TIMEO (5*HZ)
+#define XMIT_RETRANS_TIMES 3
+#define ZVNET_SKB_PAD 128
+#define ZVNET_TMP_BUFF_LEN 2048
+#define ZVNET_FREE_BUFF_NUM 256
+#define ZVNET_XMIT_BUFF_NUM 64
+#define ZVNET_XMIT_MAX_QUEUE_NUM 2048
+
+/*******************************************************************************
+ * Type definitions *
+ ******************************************************************************/
+//AP´«µÝ¸øCAPµÄÊý¾Ý°üÐÅÏ¢£¬¸ÃÄÚÈÝдÈë¹²ÏíDDR
+struct T_zvnet_rpmsg
+{
+ void *buff;//skb_headÖ¸Õ룬ÓÃÓÚÊÍ·Åʱ´«µÝ¸øºË¼ä£¬ÒÔ±ã¿ìËÙÊÍ·Å;
+ void *head;//ºË¼äÄÜʹÓõĵØÖ·±ß½ç£¬²»ÄÜÔ½½ç£¬·ñÔòÄÚ´æÒç³öÒì³£;ÎïÀíµØÖ·
+ unsigned short data_off;//ºË¼ä´«µÝÀ´µÄÊý¾Ý°üÊ×µØÖ·£¬Ö¸ÏòMACÖ¡Í·;ÎïÀíµØÖ·
+ unsigned short len;//Êý¾Ý°üÓÐЧ³¤¶È£¬Ò»°ãΪMACÖ¡³¤¶È
+ unsigned short end_off;//end offset
+ unsigned char dev;//cid 1->8
+ unsigned char flag;//0ÆÕͨ°ü£¬1¶þ´Îת·¢°ü£¬2¶þ´Îfastת·¢°ü
+};
+struct T_zvnet_pkt_stats
+{
+ unsigned int pkt;
+ unsigned int len;
+};
+//AP´«µÝ¸øCAPµÄCTÐÅÏ¢£¬¸ÃÄÚÈÝдÈë¹²ÏíDDR
+struct T_zvnet_rpmsg_ctstat
+{
+ void *cap_nfct;
+ unsigned char in;
+ unsigned char out;
+ unsigned short flag;
+ struct T_zvnet_pkt_stats pkt[2];
+};
+/*******************************************************************************
+ * Local variable definitions *
+ ******************************************************************************/
+struct zvnet_device zvnet_dev[DDR_ZVNET_DEV_MAX];
+int *vir_addr_ap = NULL;
+struct sk_buff_head g_zvnet_skb_queue;
+struct zvnet_channel g_zvnet_chn_info;
+
+#ifdef USE_ZVNET_PACKET
+void *g_zvnet_free_buff[ZVNET_FREE_BUFF_NUM];
+int g_zvnet_free_num;
+spinlock_t g_zvnet_free_lock;
+struct semaphore g_zvnet_free_sem;
+struct semaphore g_zvnet_xmit_sem;
+struct sk_buff_head g_zvnet_skb_xmit_queue;
+
+unsigned int g_wrap_packet_size = 1000;
+module_param(g_wrap_packet_size, int, 0644);
+unsigned int g_wrap_num = 10;
+module_param(g_wrap_num, int, 0644);
+unsigned int g_wrap_timeout = 10;
+module_param(g_wrap_timeout, int, 0644);
+#endif
+
+/*******************************************************************************
+ * Global variable definitions *
+ ******************************************************************************/
+extern int (*fast_from_driver)(struct sk_buff *skb, struct net_device* dev);
+extern void v7_dma_map_area(const void *, size_t, int);
+extern void *get_ct_for_ap(struct sk_buff *skb);
+extern void put_ct_for_ap(void *ct);
+extern spinlock_t fast_fw_spinlock;
+/*******************************************************************************
+ * Local function declarations *
+ ******************************************************************************/
+static int zvnet_open(struct net_device *net);
+static int zvnet_close(struct net_device *net);
+static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net);
+static void zvnet_tx_timeout(struct net_device *net, unsigned int txqueue);
+static struct net_device_stats *zvnet_get_stats(struct net_device *net);
+static void v2xnet_init_netdev(struct net_device *net);
+static void zvnet_skb_return (struct zvnet *dev, struct sk_buff *skb);
+static void zvnet_bh (unsigned long param);
+static struct zvnet *v2xnet_dev_init(struct net_device *net, struct zvnet_device *zvnetdev);
+
+static int zvnet_channel_write(struct zvnet_channel *chninfo, void *buf, unsigned int len);
+static int zvnet_channel_read(struct zvnet_channel *chninfo, void *buf, unsigned int len);
+static int zvnet_channel_clear(struct zvnet_channel *chninfo);
+static int zvnet_read_header(struct zvnet_channel *chninfo, struct zvp_header *phzvp);
+static struct sk_buff *zvnet_direct_read_skb(struct zvnet_channel *chninfo);
+static struct sk_buff *zvnet_read_skb(struct zvnet_channel *chninfo, unsigned int tlen, struct zvnet *dev);
+static int zvnet_receive_thread(void *argv);
+static int rpmsgCreateChannel_v2xnet (T_RpMsg_CoreID dstCoreID, T_RpMsg_ChID chID, unsigned int size);
+static int zvnet_createIcpChannel(T_RpMsg_CoreID core_id, T_RpMsg_ChID channel_id, unsigned int channel_size);
+static int zvnet_channel_create(struct zvnet_device *zvnetdev);
+
+/*******************************************************************************
+ * Local function implementations *
+ ******************************************************************************/
+
+unsigned long virt_to_phys_ap(unsigned long virt)
+{
+ if(virt >= (unsigned long)vir_addr_ap && virt <= ((unsigned long)vir_addr_ap+DDR_BASE_LEN_AP))
+ return DDR_BASE_ADDR_AP + (virt - (unsigned long)vir_addr_ap);
+ return NULL;
+}
+
+unsigned long phys_to_virt_ap(unsigned long phys)
+{
+ if(phys >= DDR_BASE_ADDR_AP && phys <= (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP))
+ return (unsigned long)vir_addr_ap + (phys - DDR_BASE_ADDR_AP);
+ return NULL;
+}
+
+void check_skb_test(struct sk_buff *skb)
+{
+ if(skb && vir_addr_ap){
+ struct sk_buff *tmp_skb;
+ if((skb->capHead && (virt_to_phys_ap(skb->head) == NULL))
+ || ((skb->capHead == NULL) && virt_to_phys_ap(skb->head))){
+ dump_stack();
+ msleep(1000);
+ panic("capHead err");
+ }
+ skb_queue_walk(&g_zvnet_skb_queue, tmp_skb) {
+ if(tmp_skb == skb){
+ dump_stack();
+ msleep(1000);
+ panic("dup free");
+ }
+ }
+ }
+}
+
+int zvnet_get_index_by_netdev(struct net_device *net)
+{
+ int i;
+
+ for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
+ if(zvnet_dev[i].net == net)
+ return i;
+ }
+ return -1;
+}
+#ifdef USE_ZVNET_PACKET
+void write_free_apbuf_packet(void)
+{
+ int ret,size;
+ unsigned long flags;
+ void *buf[ZVNET_FREE_BUFF_NUM];
+
+ spin_lock_irqsave(&g_zvnet_free_lock, flags);
+ if(g_zvnet_free_num == 0){
+ spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
+ return;
+ }
+ size = (g_zvnet_free_num << 2);
+ memcpy(buf, g_zvnet_free_buff, size);
+ g_zvnet_free_num = 0;
+ spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
+ {
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 29,
+ .flag = RPMSG_WRITE_INT|RPMSG_WRITE_IRQLOCK};
+ if((size >> 2) > ZVNET_FREE_BUFF_NUM)
+ panic("free packet err");
+ msg.buf = &buf;
+ msg.len = size;
+ zv_info("tofree size=%d", size);
+ ret = zvnetWrite(&msg);
+ if(ret < 0)
+ panic("err, ret:%d!!!!!!", ret);
+ }
+}
+#endif
+//½«CAP²à´«µÝÀ´µÄcapbufÖ¸ÕëдÈëDDR£¬ÒÔICP·½Ê½Í¨ÖªCAP²à
+void write_free_apbuf(void *head)
+{
+#ifdef USE_ZVNET_PACKET
+ unsigned long flags;
+
+ zv_info("g_zvnet_free_num=%d skb=0x%x", g_zvnet_free_num, head);
+ spin_lock_irqsave(&g_zvnet_free_lock, flags);
+ g_zvnet_free_buff[g_zvnet_free_num] = head;
+ g_zvnet_free_num++;
+ if(g_zvnet_free_num == ZVNET_FREE_BUFF_NUM){
+ int size;
+ void *buf[ZVNET_FREE_BUFF_NUM];
+
+ size = (g_zvnet_free_num << 2);
+ memcpy(buf, g_zvnet_free_buff, size);
+ g_zvnet_free_num = 0;
+ spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
+ if((size >> 2) > ZVNET_FREE_BUFF_NUM)
+ panic("free packet err");
+ {
+ int ret;
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 29,
+ .flag = RPMSG_WRITE_INT|RPMSG_WRITE_IRQLOCK};
+ msg.buf = &buf;
+ msg.len = size;
+ if(printk_ratelimit())
+ zv_warn("tofree quick size=%d", size);
+ ret = zvnetWrite(&msg);
+ if(ret < 0)
+ panic("err, ret:%d!!!!!!", ret);
+ }
+ return;
+ }
+ if(g_zvnet_free_num > g_wrap_num)
+ up(&g_zvnet_free_sem);
+ if(g_zvnet_free_num > ZVNET_FREE_BUFF_NUM)
+ panic("free_buff err");
+ spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
+#else
+ int ret = 0;
+ long buf = (long)head;
+
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 29,
+ .flag = RPMSG_WRITE_INT,
+ .buf = NULL,
+ .len = 4 };
+ msg.buf = &buf;
+ zv_info("tofree 0x%x", head);
+ ret = zvnetWrite(&msg);
+ if(ret < 0)
+ panic("err, ret:%d!!!!!!", ret);
+#endif
+}
+
+struct sk_buff *skb_build_apbuf(struct T_zvnet_rpmsg *pbuf_temp)
+{
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+ zv_info("build 0x%x 0x%x %d %d", pbuf_temp->buff, pbuf_temp->head, pbuf_temp->data_off, pbuf_temp->len);
+ if((unsigned long )pbuf_temp->head < DDR_BASE_ADDR_AP || (unsigned long )pbuf_temp->head > (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP)){
+ zv_err("err 0x%x 0x%x %d %d", pbuf_temp->buff, pbuf_temp->head, pbuf_temp->data_off, pbuf_temp->len);
+ panic("addr is not APBUF mem!!!");
+ return NULL;
+ }
+ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ if (!skb)
+ {
+ write_free_apbuf(pbuf_temp->buff);
+ zv_err("alloc fail");
+ return NULL;
+ }
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+
+ //ÅжÏÊÇ·ñÊÇapbuff
+
+ if(pbuf_temp->dev < 0 || pbuf_temp->dev >= DDR_ZVNET_DEV_MAX)
+ panic("dev index error!!!");
+ skb->head = phys_to_virt_ap((unsigned long )pbuf_temp->head);
+ skb->data = skb->head + pbuf_temp->data_off;
+ skb->capHead = pbuf_temp->buff;
+ //ÐèÒª¿¼ÂÇcacheÐÐ¶ÔÆë
+ skb->truesize = SKB_TRUESIZE(skb->data - skb->head + pbuf_temp->len);//°´µÀÀíβ²¿»¹ÓпÉÓõÄÄÚ´æ¿Õ¼ä£¬ÔÝʱδ¿¼ÂÇ;SKB_DATA_ALIGN
+
+ refcount_set(&skb->users, 1);
+ skb_reset_tail_pointer(skb);
+ skb->tail += pbuf_temp->len;
+ skb->len = pbuf_temp->len;
+ skb->end = skb->head + pbuf_temp->end_off;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+ skb->dev = zvnet_dev[pbuf_temp->dev].net;
+ if(skb->len == 0 || skb->len > 2000)
+ panic("len ERR!!!!!!!!!!\n");
+ v7_dma_map_area(skb->head, sizeof(struct skb_shared_info) + pbuf_temp->end_off, DMA_FROM_DEVICE);
+ if(IFF_NOARP & skb->dev->flags)
+ memcpy(skb->data, skb->dev->dev_addr, 6);
+ atomic_set(&skb_shinfo(skb)->dataref, 1);
+ /* make sure we initialize shinfo sequentially */
+ skb_reset_network_header(skb);
+ skb_set_kcov_handle(skb, kcov_common_handle());
+ return skb;
+}
+
+int eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ netdev_warn(dev, "%s is deprecated!\n", __func__);
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void skb_debug_test(struct sk_buff *skb)
+{
+ int i;
+ int vcount = skb->len/10;
+ int rcount = skb->len%10;
+ char tmp[64] = {0};
+ char strbuf[64] = {0};
+
+ zv_info("\n");
+ for(i=0; i<vcount; i++) {
+ zv_info("%d---0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x\n",i,skb->data[0+10*i],skb->data[1+10*i],skb->data[2+10*i],
+ skb->data[3+10*i],skb->data[4+10*i],skb->data[5+10*i],skb->data[6+10*i],skb->data[7+10*i],skb->data[8+10*i],skb->data[9+10*i]);
+ }
+ if(vcount > 0) {
+ memset(tmp,0,sizeof(tmp));
+ memset(strbuf,0,sizeof(strbuf));
+ sprintf(strbuf,"%d---",vcount);
+
+ for(i=0; i<rcount; i++)
+ {
+ sprintf(tmp,"0x%x,",skb->data[10*vcount + i]);
+ strcat(strbuf,tmp);
+ }
+ zv_info("%s ",strbuf);
+ }
+ zv_info("\n");
+}
+
+static int zvnet_open(struct net_device *net)
+{
+ struct zvnet *dev = netdev_priv(net);
+
+ if(net->flags & IFF_UP) {
+ zv_dbg("%s has been opened!", dev->net->name);
+ return -EBUSY;
+ }
+ netif_start_queue (net);
+
+ return 0;
+}
+
+static int zvnet_close(struct net_device *net)
+{
+ struct zvnet *dev = netdev_priv(net);
+
+ zv_info("%s", dev->net->name);
+ netif_stop_queue(net);
+ tasklet_kill (&dev->bh);
+
+ return 0;
+}
+#ifdef USE_ZVNET_PACKET
+static void zvnet_xmit_packet(void)
+{
+ int i,j,k,ret,num;
+ unsigned long flags;
+ unsigned long flags1;
+ struct sk_buff *skb, *tmp;
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 20,
+ .flag = RPMSG_WRITE_INT};
+ static struct T_zvnet_rpmsg buff[ZVNET_XMIT_MAX_QUEUE_NUM+1];
+
+ spin_lock_irqsave(&g_zvnet_skb_xmit_queue.lock, flags);
+ if (skb_queue_empty(&g_zvnet_skb_xmit_queue)) {
+ spin_unlock_irqrestore(&g_zvnet_skb_xmit_queue.lock, flags);
+ return;
+ }
+ i = 0;
+ skb_queue_walk_safe(&g_zvnet_skb_xmit_queue, skb, tmp) {
+ //buff[i].buff = skb;
+ buff[i].data_off = skb->data - skb->head;
+ //buff[i].head = virt_to_phys(skb->head);
+ buff[i].len = skb->len;
+ buff[i].end_off = skb->end - skb->head;
+ buff[i].dev = zvnet_get_index_by_netdev(skb->dev);
+ if(skb->capHead){
+ buff[i].buff = skb->capHead;
+#ifdef CONFIG_FASTNAT_MODULE
+ if(skb->isFastnat){
+ buff[i].head = get_ct_for_ap(skb);
+ buff[i].flag = 2;
+ }else
+#endif
+ {
+ buff[i].head = NULL;
+ buff[i].flag = 1;
+ }
+ __skb_unlink(skb, &g_zvnet_skb_xmit_queue);
+ kfree_skb(skb);
+ }else{
+ buff[i].buff = skb;
+ buff[i].head = virt_to_phys(skb->head);
+ buff[i].flag = 0;
+ }
+ i++;
+ zv_info("xmit skb=0x%x i=%d", skb, i);
+ if(i > ZVNET_XMIT_MAX_QUEUE_NUM){
+ panic("qlen:%d!", i);
+ break;
+ }
+ }
+ spin_lock_irqsave(&g_zvnet_skb_queue.lock, flags1);
+ skb_queue_splice_tail_init(&g_zvnet_skb_xmit_queue, &g_zvnet_skb_queue);
+ spin_unlock_irqrestore(&g_zvnet_skb_queue.lock, flags1);
+ spin_unlock_irqrestore(&g_zvnet_skb_xmit_queue.lock, flags);
+ zv_info("g_zvnet_skb_queue.qlen=%d i=%d", g_zvnet_skb_queue.qlen, i);
+ for(j = 0; j < i; j = j + ZVNET_XMIT_BUFF_NUM){
+ if(i <= (j + ZVNET_XMIT_BUFF_NUM)){
+ msg.buf = (void *)&buff[j];
+ msg.len = sizeof(struct T_zvnet_rpmsg)*(i-j);/*±¾´ÎÄÜÈ¡¹â*/
+ ret = zvnetWrite(&msg);
+ }else{
+ msg.buf = (void *)&buff[j];
+ msg.len = sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM;
+ ret = zvnetWrite(&msg);
+ }
+ zv_info("xmit write ret=%d size=%d i=%d j=%d", ret, msg.len, i, j);
+ if(ret < 0) {
+ if(printk_ratelimit())
+ zv_warn("zvnet_channel_write ret=%d fail.",ret);
+ num = msg.len / sizeof(struct T_zvnet_rpmsg);
+ for(k = j; k < j+num; k++){
+ if(buff[k].flag == 0){
+ skb = (struct sk_buff *)buff[k].buff;
+ skb_unlink(skb, &g_zvnet_skb_queue);
+ skb->isToap = 0;
+ kfree_skb(skb);
+ }else{
+ if(buff[k].head)
+ put_ct_for_ap(buff[k].head);
+ write_free_apbuf(buff[k].buff);
+ }
+ }
+ }
+ }
+}
+#endif
+static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net)
+{
+#ifdef USE_ZVNET_PACKET
+ struct sk_buff *data = NULL;
+
+ //zv_info("g_zvnet_skb_xmit_queue.qlen=%d", g_zvnet_skb_xmit_queue.qlen);
+ if(g_zvnet_skb_xmit_queue.qlen >= ZVNET_XMIT_MAX_QUEUE_NUM){
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("write err, qlen:%d!", g_zvnet_skb_xmit_queue.qlen);
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ if(unlikely(skb_headroom(skb) < NET_SKB_PAD || skb->next//|| skb->capHead
+ || skb->fclone || skb->cloned || (skb_shinfo(skb)->nr_frags)
+ || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) || (skb_has_frag_list(skb)))){
+ int ret_len = skb->len;
+
+ data = dev_alloc_skb(ret_len + NET_IP_ALIGN);
+ if (unlikely(!data)) {
+ zv_err("dev_alloc_skb fail,len %d",ret_len);
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+ skb_put(data,ret_len);
+ skb_reserve(data, NET_IP_ALIGN);
+ memcpy(data->data, skb->data, ret_len);
+ zv_info("ap=0x%x next=0x%x clone=%d nr_frags=%d tx_flags=%d frag_list=0x%x", skb->capHead, skb->next, skb->cloned, (skb_shinfo(skb)->nr_frags), skb_shinfo(skb)->tx_flags, skb_shinfo(skb)->frag_list);
+ kfree_skb(skb);
+ }else{
+ data = skb;
+ }
+ data->dev = net;
+ data->isToap = 1;
+ v7_dma_map_area(data->head, data->end - data->head + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
+ skb_queue_tail(&g_zvnet_skb_xmit_queue, data);
+ if(data->len < g_wrap_packet_size || g_zvnet_skb_xmit_queue.qlen > g_wrap_num)
+ up(&g_zvnet_xmit_sem);
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
+#else
+ struct zvnet *dev = netdev_priv(net);
+ struct zvnet_device *zvnetdev = (struct zvnet_device *)dev->dev_priv;
+ int ret = 0;
+ struct zvp_header hzvp;
+
+ if (!skb) {
+ zv_err("err: skb == 0!");
+ }
+#if 0
+ if (skb->len > ZVNET_TMP_BUFF_LEN) {
+ zv_err("err: skb->len(%d)>%d!", skb->len, ZVNET_TMP_BUFF_LEN);
+ }
+
+send_header:
+ ret = zvnet_channel_write(&(zvnetdev->chn_info), skb->data, skb->len);
+
+ if((ret < 0) && (zvnetdev->retran_times < XMIT_RETRANS_TIMES)) {
+ zvnetdev->retran_times ++;
+ zv_warn("The retran_times is %d.",zvnetdev->retran_times);
+ goto send_header;
+ }
+
+ if (ret >= 0) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
+ } else {
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("write err, ret:%d!", ret);
+ }
+
+exit:
+ kfree_skb(skb);
+#else
+ struct T_zvnet_rpmsg buff = {0};
+ struct sk_buff *data = NULL;
+ if(unlikely(skb_headroom(skb) < NET_SKB_PAD || skb->capHead || skb->next
+ || skb->fclone || skb->cloned || (skb_shinfo(skb)->nr_frags)
+ || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) || (skb_has_frag_list(skb)))){
+ int ret_len = skb->len;
+
+ data = dev_alloc_skb(ret_len + NET_IP_ALIGN);
+ if (unlikely(!data)) {
+ zv_err("dev_alloc_skb fail,len %d",ret_len);
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+ skb_put(data,ret_len);
+ skb_reserve(data, NET_IP_ALIGN);
+ memcpy(data->data, skb->data, ret_len);
+ data->isToap = 1;
+ buff.buff = data;
+ buff.data_off = data->data - data->head;
+ buff.head = virt_to_phys(data->head);
+ buff.len = ret_len;
+ buff.end_off = data->end - data->head;
+ buff.dev = zvnet_get_index_by_netdev(net);
+ zv_info("alloc 0x%x 0x%x %d %d", buff.buff, buff.head, buff.data_off, buff.len);
+ zv_info("ap=0x%x next=0x%x clone=%d nr_frags=%d tx_flags=%d frag_list=0x%x", skb->capHead, skb->next, skb->cloned, (skb_shinfo(skb)->nr_frags), skb_shinfo(skb)->tx_flags, skb_shinfo(skb)->frag_list);
+ v7_dma_map_area(data->head, buff.end_off + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
+ }else{
+ skb->isToap = 1;
+ buff.buff = skb;
+ buff.data_off = skb->data - skb->head;
+ buff.head = virt_to_phys(skb->head);
+ buff.len = skb->len;
+ buff.end_off = skb->end - skb->head;
+ buff.dev = zvnet_get_index_by_netdev(net);
+ zv_info("transfer 0x%x %d 0x%x %d", buff.buff, buff.head, buff.data_off, buff.len);
+ v7_dma_map_area(skb->head, buff.end_off + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
+ }
+send_header:
+ ret = zvnet_channel_write(&g_zvnet_chn_info, &buff, sizeof(struct T_zvnet_rpmsg));
+
+ if((ret < 0) && (zvnetdev->retran_times < XMIT_RETRANS_TIMES)) {
+ zvnetdev->retran_times ++;
+ zv_warn("The retran_times is %d.",zvnetdev->retran_times);
+ goto send_header;
+ }
+
+ if (ret >= 0) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
+ if(data){
+ kfree_skb(skb);
+ skb_queue_tail(&g_zvnet_skb_queue, data);
+ }else
+ skb_queue_tail(&g_zvnet_skb_queue, skb);
+ zvnetdev->retran_times = 0;
+ } else {
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("write err, ret:%d!", ret);
+ if(data){
+ data->isToap = 0;
+ kfree_skb(data);
+ }
+ else
+ skb->isToap = 0;
+ kfree_skb(skb);
+ }
+#endif
+#endif
+ return NET_XMIT_SUCCESS;
+}
+
+/* Called by the kernel when transmit times out */
+static void zvnet_tx_timeout(struct net_device *net, unsigned int txqueue)
+{
+ zv_warn("sent timeout!");
+ net->stats.tx_errors++;
+ netif_wake_queue(net);
+}
+
+static struct net_device_stats *zvnet_get_stats(struct net_device *net)
+{
+ return &net->stats;
+}
+
+const struct net_device_ops zvnet_netdev_ops = {
+ .ndo_open = zvnet_open,
+ .ndo_stop = zvnet_close,
+ .ndo_start_xmit = zvnet_xmit,
+ .ndo_tx_timeout = zvnet_tx_timeout,
+ .ndo_get_stats = zvnet_get_stats,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void v2xnet_init_netdev(struct net_device *net)
+{
+ u8 node_id [ETH_ALEN];
+
+ random_ether_addr(node_id);
+ memcpy (net->dev_addr, node_id, sizeof node_id);
+
+ net->netdev_ops = &zvnet_netdev_ops;
+ net->watchdog_timeo = WATCHDOG_TIMEO;
+ net->flags |= IFF_NOARP;
+}
+
+static void zvnet_skb_return (struct zvnet *dev, struct sk_buff *skb)
+{
+ int status;
+
+ //zv_info("enter...");
+
+ //skb->protocol = eth_type_trans(skb, dev->net);
+
+ status = netif_rx (skb);
+ if (status == NET_RX_SUCCESS) {
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
+ } else {
+ dev->net->stats.rx_errors++;
+ zv_err("netif_rx status %d.", status);
+ }
+}
+
+static void zvnet_bh (unsigned long param)
+{
+ struct zvnet *dev = (struct zvnet *)param;
+ struct sk_buff *skb;
+
+ while((skb = skb_dequeue(&dev->rxq)) != NULL) {
+ if (skb->len)
+ zvnet_skb_return(dev, skb);
+ else {
+ dev->net->stats.rx_errors++;
+ dev_kfree_skb (skb);
+ zv_err("drop!!!ddrnet_bh skb len == 0.");
+ }
+ }
+}
+
+static struct zvnet *v2xnet_dev_init(struct net_device *net, struct zvnet_device *zvnetdev)
+{
+ struct zvnet *dev = NULL;
+
+ dev = netdev_priv(net);
+ if(!dev) {
+ zv_err("dev is null.\n");
+ return NULL;
+ }
+
+ dev->net = net;
+ dev->bh.func = zvnet_bh;
+ dev->bh.data = (unsigned long) dev;
+
+ skb_queue_head_init (&dev->rxq);
+
+ dev->dev_priv = zvnetdev;
+
+ return dev;
+}
+
+/*·µ»ØÖµ´óÓÚµÈÓÚ0£¬±íʾдͨµÀ³É¹¦£»Ð¡ÓÚ0±íʾдͨµÀʧ°Ü*/
+static int zvnet_channel_write(struct zvnet_channel *chninfo, void *buf, unsigned int len)
+{
+ T_RpMsg_Msg msg;
+
+ if(NULL == buf) {
+ return -EINVAL;
+ }
+ memset(&msg, 0, sizeof(msg));
+ msg.coreID = chninfo->core_id;
+ msg.chID = chninfo->channel_id;
+ msg.flag |= RPMSG_WRITE_INT; //| RPMSG_WRITE_IRQLOCK;
+ msg.buf = buf;
+ msg.len = len;
+
+ return zvnetWrite(&msg);
+}
+
+/*·µ»ØÖµ´óÓÚ0£¬±íʾ¶ÁȡͨµÀ³É¹¦£»Ð¡ÓÚµÈÓÚ0±íʾͨµÀÊý¾ÝΪ¿Õ»òʧ°Ü*/
+static int zvnet_channel_read(struct zvnet_channel *chninfo, void *buf, unsigned int len)
+{
+ T_RpMsg_Msg msg;
+ int ret = 0;
+
+ if(NULL == buf) {
+ return -EINVAL;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.coreID = chninfo->core_id;
+ msg.chID = chninfo->channel_id;
+ msg.buf = buf;
+ msg.len = len;
+
+ ret = zvnetRead(&msg);
+ if (ret <= 0) {
+ zv_err("rpm read err=%d!",ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int zvnet_channel_clear(struct zvnet_channel *chninfo)
+{
+ char *tbuf = NULL;
+ unsigned int tlen = chninfo->channel_size/2;
+ int ret = 0;
+
+ tbuf = (char *)kzalloc(tlen,GFP_ATOMIC);
+ if(IS_ERR(tbuf)) {
+ zv_err("kzalloc fail! %d byte.", tlen);
+ return -ENOMEM;
+ }
+ ret = zvnet_channel_read(chninfo, tbuf, tlen);
+ if(ret < 0) {
+ zv_err("zvnet_channel_read fail!");
+ ret = 0;
+ }
+ kfree(tbuf);
+ zv_err("Drop channel data. %d byte.",ret);
+
+ return ret;
+}
+
+static int zvnet_read_header(struct zvnet_channel *chninfo, struct zvp_header *phzvp)
+{
+ return zvnet_channel_read(chninfo, phzvp, sizeof(struct zvp_header));
+}
+
+static struct sk_buff *zvnet_read_skb(struct zvnet_channel *chninfo, unsigned int tlen, struct zvnet *dev)
+{
+ struct sk_buff *skb;
+
+ if(NULL == chninfo || 0 >= tlen || NULL == dev) {
+ return NULL;
+ }
+ skb = dev_alloc_skb(tlen);
+ if (unlikely(!skb)) {
+ zv_err("netdev_alloc_skb fail,len %d",tlen);
+ return NULL;
+ }
+ skb_put(skb,tlen);
+
+ if(zvnet_channel_read(chninfo, (void *)skb->data, tlen) != tlen) {
+ zv_err("zvnet_channel_read fail.\n");
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ zv_info("%s dev receive packet %d byte.",dev->net->name, tlen);
+
+ skb->dev = dev->net;
+
+ return skb;
+}
+
+static struct sk_buff *zvnet_direct_read_skb(struct zvnet_channel *chninfo)
+{
+ struct sk_buff *skb;
+#if 0
+ char skb_data[ZVNET_TMP_BUFF_LEN];
+ int ret_len = 0;
+
+ if(NULL == chninfo || NULL == dev) {
+ zv_err("zvnet_channel_read null.\n");
+ return NULL;
+ }
+
+ ret_len = zvnet_channel_read(chninfo, (void *)skb_data, sizeof(skb_data));
+
+ if(ret_len <= 0) {
+ zv_err("zvnet_channel_read fail.\n");
+ return NULL;
+ }
+
+ skb = dev_alloc_skb(ret_len + ZVNET_SKB_PAD);
+ if (unlikely(!skb)) {
+ zv_err("netdev_alloc_skb fail,len %d",ret_len);
+ return NULL;
+ }
+
+ skb_put(skb,ret_len);
+ skb_reserve(skb, ZVNET_SKB_PAD);
+ memcpy(skb->data, &skb_data[0], ret_len);
+#else
+ struct T_zvnet_rpmsg buff = {0};
+ int ret_len = 0;
+ ret_len = zvnet_channel_read(chninfo, (void *)&buff, sizeof(struct T_zvnet_rpmsg));
+
+ if(ret_len <= 0) {
+ zv_err("rpm read err=%d", ret_len);
+ msleep(1000);
+ return NULL;
+ }
+ if(ret_len != sizeof(struct T_zvnet_rpmsg)) {
+ panic("err, ret:%d!!!!!!", ret_len);
+ }
+ skb = skb_build_apbuf(&buff);
+ if (unlikely(!skb)) {
+ zv_err("netdev_alloc_skb fail,len %d",ret_len);
+ return NULL;
+ }
+#endif
+ //skb->dev = dev->net;
+ return skb;
+}
+
+static int zvnet_receive_thread(void *argv)
+{
+ //struct zvnet_device *zvnetdev = (struct zvnet_device *)argv;
+ //struct zvnet_channel *chninfo = NULL;
+ struct zvnet *dev = NULL;
+ int index,ret_len,i,num;
+ unsigned long flags;
+ struct sk_buff *skb = NULL;
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 20,
+ .flag = 0};
+ struct T_zvnet_rpmsg buff[ZVNET_XMIT_BUFF_NUM];
+ //struct zvp_header hzvp;
+/*
+ if(IS_ERR(zvnetdev)) {
+ zv_err("The receive thread create fail!");
+ return -EINVAL;
+ }
+ chninfo = &zvnetdev->chn_info;
+ dev = zvnetdev->dev;
+*/
+ while(1) {
+/*
+ if(unlikely(!(zvnetdev->net->flags & IFF_UP))) {
+ msleep(1000);
+ continue;
+ }
+*/
+ //memset(&hzvp, 0, sizeof(hzvp));
+#ifdef USE_ZVNET_PACKET
+ //ret_len = zvnet_channel_read(&g_zvnet_chn_info, (void *)buff, sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM);
+ msg.buf = (void *)(buff); // Êý¾Ý
+ msg.len = sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM;// ¶ÁÈ¡µÄ³¤¶È
+ ret_len = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
+ zv_info("zvnetRead ret=%d", ret_len);
+ if(ret_len <= 0) {
+ zv_err("rpm read err=%d", ret_len);
+ msleep(1000);
+ continue;
+ }
+ if((ret_len % sizeof(struct T_zvnet_rpmsg)) != 0) {
+ panic("err, ret:%d!!!!!!", ret_len);
+ }
+ num = ret_len / sizeof(struct T_zvnet_rpmsg);
+ for(i = 0; i < num; i++){
+ skb = skb_build_apbuf(&buff[i]);
+ if (unlikely(!skb)) {
+ zv_err("skb_build_apbuf fail,len=%d i=%d",ret_len,i);
+ continue;
+ }
+ if(unlikely(!(skb->dev->flags & IFF_UP))) {
+ if(printk_ratelimit())
+ zv_err("drop!!!%s is down.", skb->dev->name);
+ dev_kfree_skb (skb);
+ continue;
+ }
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (fast_from_driver && fast_from_driver(skb, skb->dev))
+ {
+ continue;
+ }
+ index = zvnet_get_index_by_netdev(skb->dev);
+ if(index < 0)
+ panic("");
+ dev = zvnet_dev[index].dev;
+ spin_lock_irqsave(&dev->rxq.lock, flags);
+ __skb_queue_tail(&dev->rxq, skb);
+ spin_unlock_irqrestore(&dev->rxq.lock, flags);
+ tasklet_schedule(&dev->bh);
+ }
+#else
+ if(0 != (skb = zvnet_direct_read_skb(&g_zvnet_chn_info))) {
+ //skb_debug_test(skb);
+ if(unlikely(!(skb->dev->flags & IFF_UP))) {
+ zv_err("drop!!!%s is down.", skb->dev->name);
+ dev_kfree_skb (skb);
+ continue;
+ }
+ skb->protocol = eth_type_trans(skb, skb->dev);
+#if 1
+ if (fast_from_driver && fast_from_driver(skb, skb->dev))
+ {
+ continue;
+ }
+#endif
+ index = zvnet_get_index_by_netdev(skb->dev);
+ if(index < 0)
+ panic("");
+ dev = zvnet_dev[index].dev;
+ spin_lock_irqsave(&dev->rxq.lock, flags);
+ __skb_queue_tail(&dev->rxq, skb);
+ spin_unlock_irqrestore(&dev->rxq.lock, flags);
+ tasklet_schedule(&dev->bh);
+ }
+ else {
+ zv_err("zvnet_read_header fail.");
+ msleep(1000);
+ }
+#endif
+ }
+
+ zv_err("The receive thread exit!");
+ return 0;
+}
+
+static int rpmsgCreateChannel_v2xnet (T_RpMsg_CoreID dstCoreID, T_RpMsg_ChID chID, unsigned int size)
+{
+ return zvnetCreateChannel (dstCoreID, chID, size);
+}
+
+static int zvnet_createIcpChannel(T_RpMsg_CoreID core_id, T_RpMsg_ChID channel_id, unsigned int channel_size)
+{
+ int retval;
+
+ retval = rpmsgCreateChannel_v2xnet (core_id, channel_id, channel_size);
+ if(retval != RPMSG_SUCCESS && retval != RPMSG_CHANNEL_ALREADY_EXIST)
+ goto out;
+
+ return retval;
+
+out:
+ zv_err("could not create channel.");
+ return retval;
+}
+/*
+static int zvnet_channel_create(struct zvnet_device *zvnetdev)
+{
+ struct task_struct *th = NULL;
+ int retval = 0;
+ struct zvnet_channel *chninfo = NULL;
+
+ if (IS_ERR(zvnetdev)) {
+ return -EINVAL;
+ }
+ chninfo = &(zvnetdev->chn_info);
+ retval = zvnet_createIcpChannel(chninfo->core_id, chninfo->channel_id, chninfo->channel_size);
+ if(retval < 0) {
+ zv_err("Create IcpChannel fail.");
+ return retval;
+ }
+
+ th = kthread_run(zvnet_receive_thread, (void *)zvnetdev, "zvnet-recv%d", chninfo->channel_id);
+ if (IS_ERR(th)) {
+ zv_err("Unable to start receive thread.");
+ return PTR_ERR(th);
+ }
+ chninfo->rcv_thread = th;
+
+ return 0;
+}
+*/
+static int zvnet_release_thread(void * nouse)
+{
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 29,
+ .flag = 0};
+ void *buff[ZVNET_FREE_BUFF_NUM];
+ int i,num,retval;
+ struct sk_buff *skb;
+ struct sched_param param = { .sched_priority = 1 };
+ param.sched_priority = 37;
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
+
+ while(1) {
+ zv_info("g_zvnet_skb_queue.qlen=%d", g_zvnet_skb_queue.qlen);
+#ifdef USE_ZVNET_PACKET
+ msg.buf = (unsigned char *)(buff); // Êý¾Ý
+ msg.len = 4*ZVNET_FREE_BUFF_NUM;// ¶ÁÈ¡µÄ³¤¶È
+ retval = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
+ zv_info("free read ret=%d", retval);
+ if (retval <= 0) {
+ zv_err("rpm read err=%d", retval);
+ msleep(1000);
+ continue;
+ }
+ if((retval%4) != 0) {
+ panic("err, ret:%d!!!!!!", retval);
+ }
+ num = retval>>2;
+ for(i = 0; i < num; i++){
+ skb = (struct sk_buff *)buff[i];
+ zv_info("free 0x%x", skb);
+ if (skb == NULL || skb->next == NULL || skb->prev == NULL) {
+ panic("rpm read=%d i=%d NULL", retval, i);
+ continue;
+ }
+ skb_unlink(skb, &g_zvnet_skb_queue);
+ if(skb->isToap != 1)
+ panic("");
+ skb->isToap = 0;
+ kfree_skb(skb);
+ }
+#else
+ void *buff;
+ msg.coreID = CORE_PS0;
+ msg.chID = 29;
+ msg.buf = (unsigned char *)(&buff); // Êý¾Ý
+ msg.len = 4;// ¶ÁÈ¡µÄ³¤¶È
+ //msg.flag |= RPMSG_READ_POLL;
+
+ retval = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
+ if (retval <= 0) {
+ zv_err("no msg or threand exited");
+ msleep(1000);
+ continue;
+ }
+ if(retval != 4) {
+ panic("err, ret:%d!!!!!!", retval);
+ }
+ zv_info("free 0x%x", buff);
+ skb = (struct sk_buff *)buff;
+ skb_unlink(skb, &g_zvnet_skb_queue);
+ if(skb->isToap != 1)
+ panic("");
+ skb->isToap = 0;
+ kfree_skb(skb);
+#endif
+ }
+ zv_err("The realse thread exit!");
+ return 0;
+}
+#ifdef USE_ZVNET_PACKET
+static int zvnet_xmit_warp_thread(void * nouse)
+{
+ while(1) {
+ down_timeout(&g_zvnet_xmit_sem, msecs_to_jiffies(g_wrap_timeout));
+ zvnet_xmit_packet();
+ }
+ zv_err("The xmit warp thread exit!");
+ return 0;
+}
+
+static int zvnet_free_warp_thread(void * nouse)
+{
+ while(1) {
+ down_timeout(&g_zvnet_free_sem, msecs_to_jiffies(g_wrap_timeout));
+ write_free_apbuf_packet();
+ }
+ zv_err("The free warp thread exit!");
+ return 0;
+}
+#endif
+
+static int zvnet_update_thread(void * nouse)
+{
+ T_RpMsg_Msg msg = { .coreID = CORE_PS0,
+ .chID = 21,
+ .flag = 0};
+ int ret_len = 0;
+ struct nf_conn *ct;
+
+ while(1) {
+ struct T_zvnet_rpmsg_ctstat buff = {0};
+ msg.buf = (void *)(&buff); // Êý¾Ý
+ msg.len = sizeof(struct T_zvnet_rpmsg_ctstat);// ¶ÁÈ¡µÄ³¤¶È
+ ret_len = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
+
+ if(ret_len <= 0) {
+ zv_err("rpm read err=%d", ret_len);
+ msleep(1000);
+ continue;
+ }
+ if(ret_len != sizeof(struct T_zvnet_rpmsg_ctstat)) {
+ panic("err, ret:%d!!!!!!", ret_len);
+ }
+ ct = (struct nf_conn *)buff.cap_nfct;
+
+ WARN_ON(atomic_read(&ct->ct_general.use) == 0);
+ if(buff.flag){
+ BUG_ON(buff.in <= 0 || buff.out <= 0);
+ if(buff.pkt[0].pkt && buff.pkt[0].len){
+ zv_info("nf_update %x %s %s %d %d", buff.cap_nfct, ct->indev[0]->name, ct->outdev[0]->name, buff.in, buff.out);
+ zvnet_dev[buff.in-1].net->stats.rx_packets += buff.pkt[0].pkt;
+ zvnet_dev[buff.in-1].net->stats.rx_bytes += buff.pkt[0].len;
+ zvnet_dev[buff.out-1].net->stats.tx_packets += buff.pkt[0].pkt;
+ zvnet_dev[buff.out-1].net->stats.tx_bytes += buff.pkt[0].len;
+ }
+ if(buff.pkt[1].pkt && buff.pkt[1].len){
+ zv_info("nf_update %x %s %s %d %d", buff.cap_nfct, ct->indev[1]->name, ct->outdev[1]->name, buff.out, buff.in);
+ zvnet_dev[buff.out-1].net->stats.rx_packets += buff.pkt[1].pkt;
+ zvnet_dev[buff.out-1].net->stats.rx_bytes += buff.pkt[1].len;
+ zvnet_dev[buff.in-1].net->stats.tx_packets += buff.pkt[1].pkt;
+ zvnet_dev[buff.in-1].net->stats.tx_bytes += buff.pkt[1].len;
+ }
+ spin_lock_bh(&fast_fw_spinlock);
+ ct->packet_info[IP_CT_DIR_ORIGINAL].bytes += buff.pkt[IP_CT_DIR_ORIGINAL].len;
+ ct->packet_info[IP_CT_DIR_ORIGINAL].packets += buff.pkt[IP_CT_DIR_ORIGINAL].pkt;
+ ct->packet_info[IP_CT_DIR_REPLY].bytes += buff.pkt[IP_CT_DIR_REPLY].len;
+ ct->packet_info[IP_CT_DIR_REPLY].packets += buff.pkt[IP_CT_DIR_REPLY].pkt;
+ spin_unlock_bh(&fast_fw_spinlock);
+ zv_info("nf_update %x %d %d %d %d", buff.cap_nfct, buff.pkt[0].pkt, buff.pkt[0].len, buff.pkt[1].pkt, buff.pkt[1].len);
+ }else{
+ zv_info("nf_put %x", buff.cap_nfct);
+ nf_conntrack_put(buff.cap_nfct);
+ }
+ }
+ zv_err("The update thread exit!");
+ return 0;
+}
+
+/*******************************************************************************
+ * Global function implementations *
+ ******************************************************************************/
+static int __init zvnet_init(void)
+{
+ int i;
+ int err = -ENOMEM;
+ struct zvnet *dev = NULL;
+ struct net_device *net = NULL;
+ struct zvnet_device *zvnetdev = NULL;
+
+#ifdef USE_ZVNET_PACKET
+ skb_queue_head_init(&g_zvnet_skb_xmit_queue);
+ spin_lock_init(&g_zvnet_free_lock);
+ sema_init(&g_zvnet_free_sem, 0);
+ sema_init(&g_zvnet_xmit_sem, 0);
+#endif
+ skb_queue_head_init(&g_zvnet_skb_queue);
+ g_zvnet_chn_info.core_id = CORE_PS0;
+ g_zvnet_chn_info.channel_id = ICP_CHN_ZVNET1;
+ g_zvnet_chn_info.channel_size = ICP_CHANNEL_SIZE;
+ for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
+ zvnetdev = &zvnet_dev[i];
+ memset(zvnetdev, 0, sizeof(struct zvnet_device));
+ net = alloc_etherdev(sizeof(struct zvnet));
+ if (!net) {
+ zv_err("could not allocate device.\n");
+ return err;
+ }
+
+ net->needed_headroom += ZVNET_SKB_PAD;//NET_SKB_PAD;
+ sprintf(net->name, "%s%d", ZVNET_IFNAME_PREFIX, i);
+ dev = v2xnet_dev_init(net, zvnetdev);
+ v2xnet_init_netdev(net);
+ if(0 == i){
+ net->flags = (net->flags & (~IFF_NOARP));
+ }
+ err = register_netdev(net);
+ if (err) {
+ zv_err("register_netdev error:%d :%d\n",err,i);
+ return err;
+ }
+ zvnetdev->dev = dev;
+ zvnetdev->net = net;
+/*
+ zvnetdev->chn_info.core_id = CAP_ID;
+ zvnetdev->chn_info.channel_id = ICP_CHN_ZVNET1 + i;//zvnet_collect[i];
+ zvnetdev->chn_info.channel_size = ICP_CHANNEL_SIZE;
+ err = zvnet_channel_create(zvnetdev);
+ if(0 != err) {
+ zv_err("zvnet_channel_create error:%d :%d\n",err,i);
+ goto out_unregister_netdev;
+ }
+*/
+ }
+ {
+ struct task_struct *th = NULL;
+ int retval = 0;
+ retval = zvnet_createIcpChannel(CORE_PS0, 21, 64);
+ if(retval < 0) {
+ zv_err("Create IcpChannel channel_21 fail.");
+ return retval;
+ }
+
+ th = kthread_run(zvnet_update_thread, 0, "zvnet-update%d", 21);
+ if (IS_ERR(th)) {
+ zv_err("Unable to start update thread.");
+ return PTR_ERR(th);
+ }
+ retval = zvnet_createIcpChannel(CORE_PS0, 20, ICP_CHANNEL_SIZE);
+ if(retval < 0) {
+ zv_err("Create IcpChannel channel_20 fail.");
+ return retval;
+ }
+
+ th = kthread_run(zvnet_receive_thread, 0, "zvnet-recv%d", 20);
+ if (IS_ERR(th)) {
+ zv_err("Unable to start receive thread.");
+ return PTR_ERR(th);
+ }
+ g_zvnet_chn_info.rcv_thread = th;
+
+ retval = zvnet_createIcpChannel(CORE_PS0, 29, ICP_CHANNEL_SIZE);
+ if(retval < 0) {
+ zv_err("Create IcpChannel channel_29 fail.");
+ return retval;
+ }
+
+ th = kthread_run(zvnet_release_thread, 0, "zvnet-free%d", 29);
+ if (IS_ERR(th)) {
+ zv_err("Unable to start release thread.");
+ return PTR_ERR(th);
+ }
+#ifdef USE_ZVNET_PACKET
+ th = kthread_run(zvnet_xmit_warp_thread, 0, "zvnet-xmit-wrap");
+ if (IS_ERR(th)) {
+ zv_err("Unable to start xmit_warp thread.");
+ return PTR_ERR(th);
+ }
+
+ th = kthread_run(zvnet_free_warp_thread, 0, "zvnet-free-wrap");
+ if (IS_ERR(th)) {
+ zv_err("Unable to start free_warp thread.");
+ return PTR_ERR(th);
+ }
+#endif
+ vir_addr_ap = ioremap_cache(DDR_BASE_ADDR_AP, DDR_BASE_LEN_AP);
+ zv_warn("vir_addr_ap vir=0x%x phy=0x%x len=0x%x", vir_addr_ap, DDR_BASE_ADDR_AP, DDR_BASE_LEN_AP);
+ if(vir_addr_ap == NULL)
+ {
+ zv_err("AP mmap failed.\n");
+ return -1;
+ }
+
+ }
+ zv_dbg("success.\n");
+ return 0;
+
+}
+
+static void __exit zvnet_exit(void)
+{
+ int i;
+ struct net_device *net;
+
+ for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
+ net = zvnet_dev[i].net;
+ unregister_netdev(net);
+ free_netdev(net);
+ zvnet_dev[i].net = NULL;
+ }
+ zv_warn("success.\n");
+}
+
+late_initcall(zvnet_init);
+module_exit(zvnet_exit);
+
+MODULE_AUTHOR("ZXIC");
+MODULE_DESCRIPTION("ZXIC CAP LAN NET DEVICE");
+MODULE_LICENSE("GPL");
+