blob: bea60e191734ca0cfc12b7da2e1aefe28d1692cf [file] [log] [blame]
xf.li2f424182024-08-20 00:47:34 -07001/*******************************************************************************
2 * Include header files *
3 ******************************************************************************/
4#include <linux/module.h>
5#include <linux/etherdevice.h>
6#include <net/sock.h>
7#include <uapi/linux/sched/types.h>
8#include "zvnet_dev.h"
9#include "ram_config.h"
10#include <net/netfilter/nf_conntrack.h>
11
12/*******************************************************************************
13 * Macro definitions *
14 ******************************************************************************/
15#define USE_ZVNET_PACKET
16
17#define WATCHDOG_TIMEO (5*HZ)
18#define XMIT_RETRANS_TIMES 3
19#define ZVNET_SKB_PAD 128
20#define ZVNET_TMP_BUFF_LEN 2048
21#define ZVNET_FREE_BUFF_NUM 256
22#define ZVNET_XMIT_BUFF_NUM 64
23#define ZVNET_XMIT_MAX_QUEUE_NUM 2048
24
25/*******************************************************************************
26 * Type definitions *
27 ******************************************************************************/
28//AP´«µÝ¸øCAPµÄÊý¾Ý°üÐÅÏ¢£¬¸ÃÄÚÈÝдÈë¹²ÏíDDR
29struct T_zvnet_rpmsg
30{
31 void *buff;//skb_headÖ¸Õ룬ÓÃÓÚÊÍ·Åʱ´«µÝ¸øºË¼ä£¬ÒÔ±ã¿ìËÙÊÍ·Å;
32 void *head;//ºË¼äÄÜʹÓõĵØÖ·±ß½ç£¬²»ÄÜÔ½½ç£¬·ñÔòÄÚ´æÒç³öÒì³£;ÎïÀíµØÖ·
33 unsigned short data_off;//ºË¼ä´«µÝÀ´µÄÊý¾Ý°üÊ×µØÖ·£¬Ö¸ÏòMACÖ¡Í·;ÎïÀíµØÖ·
34 unsigned short len;//Êý¾Ý°üÓÐЧ³¤¶È£¬Ò»°ãΪMACÖ¡³¤¶È
35 unsigned short end_off;//end offset
36 unsigned char dev;//cid 1->8
37 unsigned char flag;//0ÆÕͨ°ü£¬1¶þ´Îת·¢°ü£¬2¶þ´Îfastת·¢°ü
38};
39struct T_zvnet_pkt_stats
40{
41 unsigned int pkt;
42 unsigned int len;
43};
44//AP´«µÝ¸øCAPµÄCTÐÅÏ¢£¬¸ÃÄÚÈÝдÈë¹²ÏíDDR
45struct T_zvnet_rpmsg_ctstat
46{
47 void *cap_nfct;
48 unsigned char in;
49 unsigned char out;
50 unsigned short flag;
51 struct T_zvnet_pkt_stats pkt[2];
52};
53/*******************************************************************************
54 * Local variable definitions *
55 ******************************************************************************/
56struct zvnet_device zvnet_dev[DDR_ZVNET_DEV_MAX];
57int *vir_addr_ap = NULL;
58struct sk_buff_head g_zvnet_skb_queue;
59struct zvnet_channel g_zvnet_chn_info;
60
61#ifdef USE_ZVNET_PACKET
62void *g_zvnet_free_buff[ZVNET_FREE_BUFF_NUM];
63int g_zvnet_free_num;
64spinlock_t g_zvnet_free_lock;
65struct semaphore g_zvnet_free_sem;
66struct semaphore g_zvnet_xmit_sem;
67struct sk_buff_head g_zvnet_skb_xmit_queue;
68
69unsigned int g_wrap_packet_size = 1000;
70module_param(g_wrap_packet_size, int, 0644);
71unsigned int g_wrap_num = 10;
72module_param(g_wrap_num, int, 0644);
73unsigned int g_wrap_timeout = 10;
74module_param(g_wrap_timeout, int, 0644);
75#endif
76
77/*******************************************************************************
78 * Global variable definitions *
79 ******************************************************************************/
80extern int (*fast_from_driver)(struct sk_buff *skb, struct net_device* dev);
81extern void v7_dma_map_area(const void *, size_t, int);
82extern void *get_ct_for_ap(struct sk_buff *skb);
83extern void put_ct_for_ap(void *ct);
84extern spinlock_t fast_fw_spinlock;
85/*******************************************************************************
86 * Local function declarations *
87 ******************************************************************************/
88static int zvnet_open(struct net_device *net);
89static int zvnet_close(struct net_device *net);
90static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net);
91static void zvnet_tx_timeout(struct net_device *net, unsigned int txqueue);
92static struct net_device_stats *zvnet_get_stats(struct net_device *net);
93static void v2xnet_init_netdev(struct net_device *net);
94static void zvnet_skb_return (struct zvnet *dev, struct sk_buff *skb);
95static void zvnet_bh (unsigned long param);
96static struct zvnet *v2xnet_dev_init(struct net_device *net, struct zvnet_device *zvnetdev);
97
98static int zvnet_channel_write(struct zvnet_channel *chninfo, void *buf, unsigned int len);
99static int zvnet_channel_read(struct zvnet_channel *chninfo, void *buf, unsigned int len);
100static int zvnet_channel_clear(struct zvnet_channel *chninfo);
101static int zvnet_read_header(struct zvnet_channel *chninfo, struct zvp_header *phzvp);
102static struct sk_buff *zvnet_direct_read_skb(struct zvnet_channel *chninfo);
103static struct sk_buff *zvnet_read_skb(struct zvnet_channel *chninfo, unsigned int tlen, struct zvnet *dev);
104static int zvnet_receive_thread(void *argv);
105static int rpmsgCreateChannel_v2xnet (T_RpMsg_CoreID dstCoreID, T_RpMsg_ChID chID, unsigned int size);
106static int zvnet_createIcpChannel(T_RpMsg_CoreID core_id, T_RpMsg_ChID channel_id, unsigned int channel_size);
107static int zvnet_channel_create(struct zvnet_device *zvnetdev);
108
109/*******************************************************************************
110 * Local function implementations *
111 ******************************************************************************/
112
113unsigned long virt_to_phys_ap(unsigned long virt)
114{
115 if(virt >= (unsigned long)vir_addr_ap && virt <= ((unsigned long)vir_addr_ap+DDR_BASE_LEN_AP))
116 return DDR_BASE_ADDR_AP + (virt - (unsigned long)vir_addr_ap);
117 return NULL;
118}
119
120unsigned long phys_to_virt_ap(unsigned long phys)
121{
122 if(phys >= DDR_BASE_ADDR_AP && phys <= (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP))
123 return (unsigned long)vir_addr_ap + (phys - DDR_BASE_ADDR_AP);
124 return NULL;
125}
126
127void check_skb_test(struct sk_buff *skb)
128{
129 if(skb && vir_addr_ap){
130 struct sk_buff *tmp_skb;
131 if((skb->capHead && (virt_to_phys_ap(skb->head) == NULL))
132 || ((skb->capHead == NULL) && virt_to_phys_ap(skb->head))){
133 dump_stack();
134 msleep(1000);
135 panic("capHead err");
136 }
137 skb_queue_walk(&g_zvnet_skb_queue, tmp_skb) {
138 if(tmp_skb == skb){
139 dump_stack();
140 msleep(1000);
141 panic("dup free");
142 }
143 }
144 }
145}
146
147int zvnet_get_index_by_netdev(struct net_device *net)
148{
149 int i;
150
151 for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
152 if(zvnet_dev[i].net == net)
153 return i;
154 }
155 return -1;
156}
157#ifdef USE_ZVNET_PACKET
158void write_free_apbuf_packet(void)
159{
160 int ret,size;
161 unsigned long flags;
162 void *buf[ZVNET_FREE_BUFF_NUM];
163
164 spin_lock_irqsave(&g_zvnet_free_lock, flags);
165 if(g_zvnet_free_num == 0){
166 spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
167 return;
168 }
169 size = (g_zvnet_free_num << 2);
170 memcpy(buf, g_zvnet_free_buff, size);
171 g_zvnet_free_num = 0;
172 spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
173 {
174 T_RpMsg_Msg msg = { .coreID = CORE_PS0,
175 .chID = 29,
176 .flag = RPMSG_WRITE_INT|RPMSG_WRITE_IRQLOCK};
177 if((size >> 2) > ZVNET_FREE_BUFF_NUM)
178 panic("free packet err");
179 msg.buf = &buf;
180 msg.len = size;
181 zv_info("tofree size=%d", size);
182 ret = zvnetWrite(&msg);
183 if(ret < 0)
184 panic("err, ret:%d!!!!!!", ret);
185 }
186}
187#endif
188//½«CAP²à´«µÝÀ´µÄcapbufÖ¸ÕëдÈëDDR£¬ÒÔICP·½Ê½Í¨ÖªCAP²à
189void write_free_apbuf(void *head)
190{
191#ifdef USE_ZVNET_PACKET
192 unsigned long flags;
193
194 zv_info("g_zvnet_free_num=%d skb=0x%x", g_zvnet_free_num, head);
195 spin_lock_irqsave(&g_zvnet_free_lock, flags);
196 g_zvnet_free_buff[g_zvnet_free_num] = head;
197 g_zvnet_free_num++;
198 if(g_zvnet_free_num == ZVNET_FREE_BUFF_NUM){
199 int size;
200 void *buf[ZVNET_FREE_BUFF_NUM];
201
202 size = (g_zvnet_free_num << 2);
203 memcpy(buf, g_zvnet_free_buff, size);
204 g_zvnet_free_num = 0;
205 spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
206 if((size >> 2) > ZVNET_FREE_BUFF_NUM)
207 panic("free packet err");
208 {
209 int ret;
210 T_RpMsg_Msg msg = { .coreID = CORE_PS0,
211 .chID = 29,
212 .flag = RPMSG_WRITE_INT|RPMSG_WRITE_IRQLOCK};
213 msg.buf = &buf;
214 msg.len = size;
215 if(printk_ratelimit())
216 zv_warn("tofree quick size=%d", size);
217 ret = zvnetWrite(&msg);
218 if(ret < 0)
219 panic("err, ret:%d!!!!!!", ret);
220 }
221 return;
222 }
223 if(g_zvnet_free_num > g_wrap_num)
224 up(&g_zvnet_free_sem);
225 if(g_zvnet_free_num > ZVNET_FREE_BUFF_NUM)
226 panic("free_buff err");
227 spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
228#else
229 int ret = 0;
230 long buf = (long)head;
231
232 T_RpMsg_Msg msg = { .coreID = CORE_PS0,
233 .chID = 29,
234 .flag = RPMSG_WRITE_INT,
235 .buf = NULL,
236 .len = 4 };
237 msg.buf = &buf;
238 zv_info("tofree 0x%x", head);
239 ret = zvnetWrite(&msg);
240 if(ret < 0)
241 panic("err, ret:%d!!!!!!", ret);
242#endif
243}
244
245struct sk_buff *skb_build_apbuf(struct T_zvnet_rpmsg *pbuf_temp)
246{
247 struct skb_shared_info *shinfo;
248 struct sk_buff *skb;
249 zv_info("build 0x%x 0x%x %d %d", pbuf_temp->buff, pbuf_temp->head, pbuf_temp->data_off, pbuf_temp->len);
250 if((unsigned long )pbuf_temp->head < DDR_BASE_ADDR_AP || (unsigned long )pbuf_temp->head > (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP)){
251 zv_err("err 0x%x 0x%x %d %d", pbuf_temp->buff, pbuf_temp->head, pbuf_temp->data_off, pbuf_temp->len);
252 panic("addr is not APBUF mem!!!");
253 return NULL;
254 }
255 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
256 if (!skb)
257 {
258 write_free_apbuf(pbuf_temp->buff);
259 zv_err("alloc fail");
260 return NULL;
261 }
262 memset(skb, 0, offsetof(struct sk_buff, tail));
263
264 //ÅжÏÊÇ·ñÊÇapbuff
265
266 if(pbuf_temp->dev < 0 || pbuf_temp->dev >= DDR_ZVNET_DEV_MAX)
267 panic("dev index error!!!");
268 skb->head = phys_to_virt_ap((unsigned long )pbuf_temp->head);
269 skb->data = skb->head + pbuf_temp->data_off;
270 skb->capHead = pbuf_temp->buff;
271 //ÐèÒª¿¼ÂÇcacheÐÐ¶ÔÆë
272 skb->truesize = SKB_TRUESIZE(skb->data - skb->head + pbuf_temp->len);//°´µÀÀíβ²¿»¹ÓпÉÓõÄÄÚ´æ¿Õ¼ä£¬ÔÝʱδ¿¼ÂÇ;SKB_DATA_ALIGN
273
274 refcount_set(&skb->users, 1);
275 skb_reset_tail_pointer(skb);
276 skb->tail += pbuf_temp->len;
277 skb->len = pbuf_temp->len;
278 skb->end = skb->head + pbuf_temp->end_off;
279 skb->mac_header = (typeof(skb->mac_header))~0U;
280 skb->transport_header = (typeof(skb->transport_header))~0U;
281 skb->dev = zvnet_dev[pbuf_temp->dev].net;
282 if(skb->len == 0 || skb->len > 2000)
283 panic("len ERR!!!!!!!!!!\n");
284 v7_dma_map_area(skb->head, sizeof(struct skb_shared_info) + pbuf_temp->end_off, DMA_FROM_DEVICE);
285 if(IFF_NOARP & skb->dev->flags)
286 memcpy(skb->data, skb->dev->dev_addr, 6);
287 atomic_set(&skb_shinfo(skb)->dataref, 1);
288 /* make sure we initialize shinfo sequentially */
289 skb_reset_network_header(skb);
290 skb_set_kcov_handle(skb, kcov_common_handle());
291 return skb;
292}
293
294int eth_change_mtu(struct net_device *dev, int new_mtu)
295{
296 netdev_warn(dev, "%s is deprecated!\n", __func__);
297 dev->mtu = new_mtu;
298 return 0;
299}
300
301static void skb_debug_test(struct sk_buff *skb)
302{
303 int i;
304 int vcount = skb->len/10;
305 int rcount = skb->len%10;
306 char tmp[64] = {0};
307 char strbuf[64] = {0};
308
309 zv_info("\n");
310 for(i=0; i<vcount; i++) {
311 zv_info("%d---0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x\n",i,skb->data[0+10*i],skb->data[1+10*i],skb->data[2+10*i],
312 skb->data[3+10*i],skb->data[4+10*i],skb->data[5+10*i],skb->data[6+10*i],skb->data[7+10*i],skb->data[8+10*i],skb->data[9+10*i]);
313 }
314 if(vcount > 0) {
315 memset(tmp,0,sizeof(tmp));
316 memset(strbuf,0,sizeof(strbuf));
317 sprintf(strbuf,"%d---",vcount);
318
319 for(i=0; i<rcount; i++)
320 {
321 sprintf(tmp,"0x%x,",skb->data[10*vcount + i]);
322 strcat(strbuf,tmp);
323 }
324 zv_info("%s ",strbuf);
325 }
326 zv_info("\n");
327}
328
329static int zvnet_open(struct net_device *net)
330{
331 struct zvnet *dev = netdev_priv(net);
332
333 if(net->flags & IFF_UP) {
334 zv_dbg("%s has been opened!", dev->net->name);
335 return -EBUSY;
336 }
337 netif_start_queue (net);
338
339 return 0;
340}
341
342static int zvnet_close(struct net_device *net)
343{
344 struct zvnet *dev = netdev_priv(net);
345
346 zv_info("%s", dev->net->name);
347 netif_stop_queue(net);
348 tasklet_kill (&dev->bh);
349
350 return 0;
351}
352#ifdef USE_ZVNET_PACKET
353static void zvnet_xmit_packet(void)
354{
355 int i,j,k,ret,num;
356 unsigned long flags;
357 unsigned long flags1;
358 struct sk_buff *skb, *tmp;
359 T_RpMsg_Msg msg = { .coreID = CORE_PS0,
360 .chID = 20,
361 .flag = RPMSG_WRITE_INT};
362 static struct T_zvnet_rpmsg buff[ZVNET_XMIT_MAX_QUEUE_NUM+1];
363
364 spin_lock_irqsave(&g_zvnet_skb_xmit_queue.lock, flags);
365 if (skb_queue_empty(&g_zvnet_skb_xmit_queue)) {
366 spin_unlock_irqrestore(&g_zvnet_skb_xmit_queue.lock, flags);
367 return;
368 }
369 i = 0;
370 skb_queue_walk_safe(&g_zvnet_skb_xmit_queue, skb, tmp) {
371 //buff[i].buff = skb;
372 buff[i].data_off = skb->data - skb->head;
373 //buff[i].head = virt_to_phys(skb->head);
374 buff[i].len = skb->len;
375 buff[i].end_off = skb->end - skb->head;
376 buff[i].dev = zvnet_get_index_by_netdev(skb->dev);
377 if(skb->capHead){
378 buff[i].buff = skb->capHead;
379#ifdef CONFIG_FASTNAT_MODULE
380 if(skb->isFastnat){
381 buff[i].head = get_ct_for_ap(skb);
382 buff[i].flag = 2;
383 }else
384#endif
385 {
386 buff[i].head = NULL;
387 buff[i].flag = 1;
388 }
389 __skb_unlink(skb, &g_zvnet_skb_xmit_queue);
390 kfree_skb(skb);
391 }else{
392 buff[i].buff = skb;
393 buff[i].head = virt_to_phys(skb->head);
394 buff[i].flag = 0;
395 }
396 i++;
397 zv_info("xmit skb=0x%x i=%d", skb, i);
398 if(i > ZVNET_XMIT_MAX_QUEUE_NUM){
399 panic("qlen:%d!", i);
400 break;
401 }
402 }
403 spin_lock_irqsave(&g_zvnet_skb_queue.lock, flags1);
404 skb_queue_splice_tail_init(&g_zvnet_skb_xmit_queue, &g_zvnet_skb_queue);
405 spin_unlock_irqrestore(&g_zvnet_skb_queue.lock, flags1);
406 spin_unlock_irqrestore(&g_zvnet_skb_xmit_queue.lock, flags);
407 zv_info("g_zvnet_skb_queue.qlen=%d i=%d", g_zvnet_skb_queue.qlen, i);
408 for(j = 0; j < i; j = j + ZVNET_XMIT_BUFF_NUM){
409 if(i <= (j + ZVNET_XMIT_BUFF_NUM)){
410 msg.buf = (void *)&buff[j];
411 msg.len = sizeof(struct T_zvnet_rpmsg)*(i-j);/*±¾´ÎÄÜÈ¡¹â*/
412 ret = zvnetWrite(&msg);
413 }else{
414 msg.buf = (void *)&buff[j];
415 msg.len = sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM;
416 ret = zvnetWrite(&msg);
417 }
418 zv_info("xmit write ret=%d size=%d i=%d j=%d", ret, msg.len, i, j);
419 if(ret < 0) {
420 if(printk_ratelimit())
421 zv_warn("zvnet_channel_write ret=%d fail.",ret);
422 num = msg.len / sizeof(struct T_zvnet_rpmsg);
423 for(k = j; k < j+num; k++){
424 if(buff[k].flag == 0){
425 skb = (struct sk_buff *)buff[k].buff;
426 skb_unlink(skb, &g_zvnet_skb_queue);
427 skb->isToap = 0;
428 kfree_skb(skb);
429 }else{
430 if(buff[k].head)
431 put_ct_for_ap(buff[k].head);
432 write_free_apbuf(buff[k].buff);
433 }
434 }
435 }
436 }
437}
438#endif
439static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net)
440{
441#ifdef USE_ZVNET_PACKET
442 struct sk_buff *data = NULL;
443
444 //zv_info("g_zvnet_skb_xmit_queue.qlen=%d", g_zvnet_skb_xmit_queue.qlen);
445 if(g_zvnet_skb_xmit_queue.qlen >= ZVNET_XMIT_MAX_QUEUE_NUM){
446 net->stats.tx_errors++;
447 net->stats.tx_dropped++;
448 zv_err("write err, qlen:%d!", g_zvnet_skb_xmit_queue.qlen);
449 kfree_skb(skb);
450 return NET_XMIT_SUCCESS;
451 }
452
453 if(unlikely(skb_headroom(skb) < NET_SKB_PAD || skb->next//|| skb->capHead
454 || skb->fclone || skb->cloned || (skb_shinfo(skb)->nr_frags)
455 || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) || (skb_has_frag_list(skb)))){
456 int ret_len = skb->len;
457
458 data = dev_alloc_skb(ret_len + NET_IP_ALIGN);
459 if (unlikely(!data)) {
460 zv_err("dev_alloc_skb fail,len %d",ret_len);
461 net->stats.tx_errors++;
462 net->stats.tx_dropped++;
463 kfree_skb(skb);
464 return NET_XMIT_SUCCESS;
465 }
466 skb_put(data,ret_len);
467 skb_reserve(data, NET_IP_ALIGN);
468 memcpy(data->data, skb->data, ret_len);
469 zv_info("ap=0x%x next=0x%x clone=%d nr_frags=%d tx_flags=%d frag_list=0x%x", skb->capHead, skb->next, skb->cloned, (skb_shinfo(skb)->nr_frags), skb_shinfo(skb)->tx_flags, skb_shinfo(skb)->frag_list);
470 kfree_skb(skb);
471 }else{
472 data = skb;
473 }
474 data->dev = net;
475 data->isToap = 1;
476 v7_dma_map_area(data->head, data->end - data->head + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
477 skb_queue_tail(&g_zvnet_skb_xmit_queue, data);
478 if(data->len < g_wrap_packet_size || g_zvnet_skb_xmit_queue.qlen > g_wrap_num)
479 up(&g_zvnet_xmit_sem);
480 net->stats.tx_packets++;
481 net->stats.tx_bytes += skb->len;
482#else
483 struct zvnet *dev = netdev_priv(net);
484 struct zvnet_device *zvnetdev = (struct zvnet_device *)dev->dev_priv;
485 int ret = 0;
486 struct zvp_header hzvp;
487
488 if (!skb) {
489 zv_err("err: skb == 0!");
490 }
491#if 0
492 if (skb->len > ZVNET_TMP_BUFF_LEN) {
493 zv_err("err: skb->len(%d)>%d!", skb->len, ZVNET_TMP_BUFF_LEN);
494 }
495
496send_header:
497 ret = zvnet_channel_write(&(zvnetdev->chn_info), skb->data, skb->len);
498
499 if((ret < 0) && (zvnetdev->retran_times < XMIT_RETRANS_TIMES)) {
500 zvnetdev->retran_times ++;
501 zv_warn("The retran_times is %d.",zvnetdev->retran_times);
502 goto send_header;
503 }
504
505 if (ret >= 0) {
506 net->stats.tx_packets++;
507 net->stats.tx_bytes += skb->len;
508 } else {
509 net->stats.tx_errors++;
510 net->stats.tx_dropped++;
511 zv_err("write err, ret:%d!", ret);
512 }
513
514exit:
515 kfree_skb(skb);
516#else
517 struct T_zvnet_rpmsg buff = {0};
518 struct sk_buff *data = NULL;
519 if(unlikely(skb_headroom(skb) < NET_SKB_PAD || skb->capHead || skb->next
520 || skb->fclone || skb->cloned || (skb_shinfo(skb)->nr_frags)
521 || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) || (skb_has_frag_list(skb)))){
522 int ret_len = skb->len;
523
524 data = dev_alloc_skb(ret_len + NET_IP_ALIGN);
525 if (unlikely(!data)) {
526 zv_err("dev_alloc_skb fail,len %d",ret_len);
527 net->stats.tx_errors++;
528 net->stats.tx_dropped++;
529 kfree_skb(skb);
530 return NET_XMIT_SUCCESS;
531 }
532 skb_put(data,ret_len);
533 skb_reserve(data, NET_IP_ALIGN);
534 memcpy(data->data, skb->data, ret_len);
535 data->isToap = 1;
536 buff.buff = data;
537 buff.data_off = data->data - data->head;
538 buff.head = virt_to_phys(data->head);
539 buff.len = ret_len;
540 buff.end_off = data->end - data->head;
541 buff.dev = zvnet_get_index_by_netdev(net);
542 zv_info("alloc 0x%x 0x%x %d %d", buff.buff, buff.head, buff.data_off, buff.len);
543 zv_info("ap=0x%x next=0x%x clone=%d nr_frags=%d tx_flags=%d frag_list=0x%x", skb->capHead, skb->next, skb->cloned, (skb_shinfo(skb)->nr_frags), skb_shinfo(skb)->tx_flags, skb_shinfo(skb)->frag_list);
544 v7_dma_map_area(data->head, buff.end_off + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
545 }else{
546 skb->isToap = 1;
547 buff.buff = skb;
548 buff.data_off = skb->data - skb->head;
549 buff.head = virt_to_phys(skb->head);
550 buff.len = skb->len;
551 buff.end_off = skb->end - skb->head;
552 buff.dev = zvnet_get_index_by_netdev(net);
553 zv_info("transfer 0x%x %d 0x%x %d", buff.buff, buff.head, buff.data_off, buff.len);
554 v7_dma_map_area(skb->head, buff.end_off + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
555 }
556send_header:
557 ret = zvnet_channel_write(&g_zvnet_chn_info, &buff, sizeof(struct T_zvnet_rpmsg));
558
559 if((ret < 0) && (zvnetdev->retran_times < XMIT_RETRANS_TIMES)) {
560 zvnetdev->retran_times ++;
561 zv_warn("The retran_times is %d.",zvnetdev->retran_times);
562 goto send_header;
563 }
564
565 if (ret >= 0) {
566 net->stats.tx_packets++;
567 net->stats.tx_bytes += skb->len;
568 if(data){
569 kfree_skb(skb);
570 skb_queue_tail(&g_zvnet_skb_queue, data);
571 }else
572 skb_queue_tail(&g_zvnet_skb_queue, skb);
573 zvnetdev->retran_times = 0;
574 } else {
575 net->stats.tx_errors++;
576 net->stats.tx_dropped++;
577 zv_err("write err, ret:%d!", ret);
578 if(data){
579 data->isToap = 0;
580 kfree_skb(data);
581 }
582 else
583 skb->isToap = 0;
584 kfree_skb(skb);
585 }
586#endif
587#endif
588 return NET_XMIT_SUCCESS;
589}
590
591/* Called by the kernel when transmit times out */
592static void zvnet_tx_timeout(struct net_device *net, unsigned int txqueue)
593{
594 zv_warn("sent timeout!");
595 net->stats.tx_errors++;
596 netif_wake_queue(net);
597}
598
599static struct net_device_stats *zvnet_get_stats(struct net_device *net)
600{
601 return &net->stats;
602}
603
604const struct net_device_ops zvnet_netdev_ops = {
605 .ndo_open = zvnet_open,
606 .ndo_stop = zvnet_close,
607 .ndo_start_xmit = zvnet_xmit,
608 .ndo_tx_timeout = zvnet_tx_timeout,
609 .ndo_get_stats = zvnet_get_stats,
610 .ndo_change_mtu = eth_change_mtu,
611 .ndo_validate_addr = eth_validate_addr,
612};
613
614static void v2xnet_init_netdev(struct net_device *net)
615{
616 u8 node_id [ETH_ALEN];
617
618 random_ether_addr(node_id);
619 memcpy (net->dev_addr, node_id, sizeof node_id);
620
621 net->netdev_ops = &zvnet_netdev_ops;
622 net->watchdog_timeo = WATCHDOG_TIMEO;
623 net->flags |= IFF_NOARP;
624}
625
626static void zvnet_skb_return (struct zvnet *dev, struct sk_buff *skb)
627{
628 int status;
629
630 //zv_info("enter...");
631
632 //skb->protocol = eth_type_trans(skb, dev->net);
633
634 status = netif_rx (skb);
635 if (status == NET_RX_SUCCESS) {
636 dev->net->stats.rx_packets++;
637 dev->net->stats.rx_bytes += skb->len;
638 } else {
639 dev->net->stats.rx_errors++;
640 zv_err("netif_rx status %d.", status);
641 }
642}
643
644static void zvnet_bh (unsigned long param)
645{
646 struct zvnet *dev = (struct zvnet *)param;
647 struct sk_buff *skb;
648
649 while((skb = skb_dequeue(&dev->rxq)) != NULL) {
650 if (skb->len)
651 zvnet_skb_return(dev, skb);
652 else {
653 dev->net->stats.rx_errors++;
654 dev_kfree_skb (skb);
655 zv_err("drop!!!ddrnet_bh skb len == 0.");
656 }
657 }
658}
659
660static struct zvnet *v2xnet_dev_init(struct net_device *net, struct zvnet_device *zvnetdev)
661{
662 struct zvnet *dev = NULL;
663
664 dev = netdev_priv(net);
665 if(!dev) {
666 zv_err("dev is null.\n");
667 return NULL;
668 }
669
670 dev->net = net;
671 dev->bh.func = zvnet_bh;
672 dev->bh.data = (unsigned long) dev;
673
674 skb_queue_head_init (&dev->rxq);
675
676 dev->dev_priv = zvnetdev;
677
678 return dev;
679}
680
681/*·µ»ØÖµ´óÓÚµÈÓÚ0£¬±íʾдͨµÀ³É¹¦£»Ð¡ÓÚ0±íʾдͨµÀʧ°Ü*/
682static int zvnet_channel_write(struct zvnet_channel *chninfo, void *buf, unsigned int len)
683{
684 T_RpMsg_Msg msg;
685
686 if(NULL == buf) {
687 return -EINVAL;
688 }
689 memset(&msg, 0, sizeof(msg));
690 msg.coreID = chninfo->core_id;
691 msg.chID = chninfo->channel_id;
692 msg.flag |= RPMSG_WRITE_INT; //| RPMSG_WRITE_IRQLOCK;
693 msg.buf = buf;
694 msg.len = len;
695
696 return zvnetWrite(&msg);
697}
698
699/*·µ»ØÖµ´óÓÚ0£¬±íʾ¶ÁȡͨµÀ³É¹¦£»Ð¡ÓÚµÈÓÚ0±íʾͨµÀÊý¾ÝΪ¿Õ»òʧ°Ü*/
700static int zvnet_channel_read(struct zvnet_channel *chninfo, void *buf, unsigned int len)
701{
702 T_RpMsg_Msg msg;
703 int ret = 0;
704
705 if(NULL == buf) {
706 return -EINVAL;
707 }
708
709 memset(&msg, 0, sizeof(msg));
710 msg.coreID = chninfo->core_id;
711 msg.chID = chninfo->channel_id;
712 msg.buf = buf;
713 msg.len = len;
714
715 ret = zvnetRead(&msg);
716 if (ret <= 0) {
717 zv_err("rpm read err=%d!",ret);
718 return ret;
719 }
720
721 return ret;
722}
723
724static int zvnet_channel_clear(struct zvnet_channel *chninfo)
725{
726 char *tbuf = NULL;
727 unsigned int tlen = chninfo->channel_size/2;
728 int ret = 0;
729
730 tbuf = (char *)kzalloc(tlen,GFP_ATOMIC);
731 if(IS_ERR(tbuf)) {
732 zv_err("kzalloc fail! %d byte.", tlen);
733 return -ENOMEM;
734 }
735 ret = zvnet_channel_read(chninfo, tbuf, tlen);
736 if(ret < 0) {
737 zv_err("zvnet_channel_read fail!");
738 ret = 0;
739 }
740 kfree(tbuf);
741 zv_err("Drop channel data. %d byte.",ret);
742
743 return ret;
744}
745
746static int zvnet_read_header(struct zvnet_channel *chninfo, struct zvp_header *phzvp)
747{
748 return zvnet_channel_read(chninfo, phzvp, sizeof(struct zvp_header));
749}
750
751static struct sk_buff *zvnet_read_skb(struct zvnet_channel *chninfo, unsigned int tlen, struct zvnet *dev)
752{
753 struct sk_buff *skb;
754
755 if(NULL == chninfo || 0 >= tlen || NULL == dev) {
756 return NULL;
757 }
758 skb = dev_alloc_skb(tlen);
759 if (unlikely(!skb)) {
760 zv_err("netdev_alloc_skb fail,len %d",tlen);
761 return NULL;
762 }
763 skb_put(skb,tlen);
764
765 if(zvnet_channel_read(chninfo, (void *)skb->data, tlen) != tlen) {
766 zv_err("zvnet_channel_read fail.\n");
767 kfree_skb(skb);
768 return NULL;
769 }
770
771 zv_info("%s dev receive packet %d byte.",dev->net->name, tlen);
772
773 skb->dev = dev->net;
774
775 return skb;
776}
777
778static struct sk_buff *zvnet_direct_read_skb(struct zvnet_channel *chninfo)
779{
780 struct sk_buff *skb;
781#if 0
782 char skb_data[ZVNET_TMP_BUFF_LEN];
783 int ret_len = 0;
784
785 if(NULL == chninfo || NULL == dev) {
786 zv_err("zvnet_channel_read null.\n");
787 return NULL;
788 }
789
790 ret_len = zvnet_channel_read(chninfo, (void *)skb_data, sizeof(skb_data));
791
792 if(ret_len <= 0) {
793 zv_err("zvnet_channel_read fail.\n");
794 return NULL;
795 }
796
797 skb = dev_alloc_skb(ret_len + ZVNET_SKB_PAD);
798 if (unlikely(!skb)) {
799 zv_err("netdev_alloc_skb fail,len %d",ret_len);
800 return NULL;
801 }
802
803 skb_put(skb,ret_len);
804 skb_reserve(skb, ZVNET_SKB_PAD);
805 memcpy(skb->data, &skb_data[0], ret_len);
806#else
807 struct T_zvnet_rpmsg buff = {0};
808 int ret_len = 0;
809 ret_len = zvnet_channel_read(chninfo, (void *)&buff, sizeof(struct T_zvnet_rpmsg));
810
811 if(ret_len <= 0) {
812 zv_err("rpm read err=%d", ret_len);
813 msleep(1000);
814 return NULL;
815 }
816 if(ret_len != sizeof(struct T_zvnet_rpmsg)) {
817 panic("err, ret:%d!!!!!!", ret_len);
818 }
819 skb = skb_build_apbuf(&buff);
820 if (unlikely(!skb)) {
821 zv_err("netdev_alloc_skb fail,len %d",ret_len);
822 return NULL;
823 }
824#endif
825 //skb->dev = dev->net;
826 return skb;
827}
828
829static int zvnet_receive_thread(void *argv)
830{
831 //struct zvnet_device *zvnetdev = (struct zvnet_device *)argv;
832 //struct zvnet_channel *chninfo = NULL;
833 struct zvnet *dev = NULL;
834 int index,ret_len,i,num;
835 unsigned long flags;
836 struct sk_buff *skb = NULL;
837 T_RpMsg_Msg msg = { .coreID = CORE_PS0,
838 .chID = 20,
839 .flag = 0};
840 struct T_zvnet_rpmsg buff[ZVNET_XMIT_BUFF_NUM];
841 //struct zvp_header hzvp;
842/*
843 if(IS_ERR(zvnetdev)) {
844 zv_err("The receive thread create fail!");
845 return -EINVAL;
846 }
847 chninfo = &zvnetdev->chn_info;
848 dev = zvnetdev->dev;
849*/
850 while(1) {
851/*
852 if(unlikely(!(zvnetdev->net->flags & IFF_UP))) {
853 msleep(1000);
854 continue;
855 }
856*/
857 //memset(&hzvp, 0, sizeof(hzvp));
858#ifdef USE_ZVNET_PACKET
859 //ret_len = zvnet_channel_read(&g_zvnet_chn_info, (void *)buff, sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM);
860 msg.buf = (void *)(buff); // Êý¾Ý
861 msg.len = sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM;// ¶ÁÈ¡µÄ³¤¶È
862 ret_len = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
863 zv_info("zvnetRead ret=%d", ret_len);
864 if(ret_len <= 0) {
865 zv_err("rpm read err=%d", ret_len);
866 msleep(1000);
867 continue;
868 }
869 if((ret_len % sizeof(struct T_zvnet_rpmsg)) != 0) {
870 panic("err, ret:%d!!!!!!", ret_len);
871 }
872 num = ret_len / sizeof(struct T_zvnet_rpmsg);
873 for(i = 0; i < num; i++){
874 skb = skb_build_apbuf(&buff[i]);
875 if (unlikely(!skb)) {
876 zv_err("skb_build_apbuf fail,len=%d i=%d",ret_len,i);
877 continue;
878 }
879 if(unlikely(!(skb->dev->flags & IFF_UP))) {
880 if(printk_ratelimit())
881 zv_err("drop!!!%s is down.", skb->dev->name);
882 dev_kfree_skb (skb);
883 continue;
884 }
885 skb->protocol = eth_type_trans(skb, skb->dev);
886 if (fast_from_driver && fast_from_driver(skb, skb->dev))
887 {
888 continue;
889 }
890 index = zvnet_get_index_by_netdev(skb->dev);
891 if(index < 0)
892 panic("");
893 dev = zvnet_dev[index].dev;
894 spin_lock_irqsave(&dev->rxq.lock, flags);
895 __skb_queue_tail(&dev->rxq, skb);
896 spin_unlock_irqrestore(&dev->rxq.lock, flags);
897 tasklet_schedule(&dev->bh);
898 }
899#else
900 if(0 != (skb = zvnet_direct_read_skb(&g_zvnet_chn_info))) {
901 //skb_debug_test(skb);
902 if(unlikely(!(skb->dev->flags & IFF_UP))) {
903 zv_err("drop!!!%s is down.", skb->dev->name);
904 dev_kfree_skb (skb);
905 continue;
906 }
907 skb->protocol = eth_type_trans(skb, skb->dev);
908#if 1
909 if (fast_from_driver && fast_from_driver(skb, skb->dev))
910 {
911 continue;
912 }
913#endif
914 index = zvnet_get_index_by_netdev(skb->dev);
915 if(index < 0)
916 panic("");
917 dev = zvnet_dev[index].dev;
918 spin_lock_irqsave(&dev->rxq.lock, flags);
919 __skb_queue_tail(&dev->rxq, skb);
920 spin_unlock_irqrestore(&dev->rxq.lock, flags);
921 tasklet_schedule(&dev->bh);
922 }
923 else {
924 zv_err("zvnet_read_header fail.");
925 msleep(1000);
926 }
927#endif
928 }
929
930 zv_err("The receive thread exit!");
931 return 0;
932}
933
934static int rpmsgCreateChannel_v2xnet (T_RpMsg_CoreID dstCoreID, T_RpMsg_ChID chID, unsigned int size)
935{
936 return zvnetCreateChannel (dstCoreID, chID, size);
937}
938
939static int zvnet_createIcpChannel(T_RpMsg_CoreID core_id, T_RpMsg_ChID channel_id, unsigned int channel_size)
940{
941 int retval;
942
943 retval = rpmsgCreateChannel_v2xnet (core_id, channel_id, channel_size);
944 if(retval != RPMSG_SUCCESS && retval != RPMSG_CHANNEL_ALREADY_EXIST)
945 goto out;
946
947 return retval;
948
949out:
950 zv_err("could not create channel.");
951 return retval;
952}
953/*
954static int zvnet_channel_create(struct zvnet_device *zvnetdev)
955{
956 struct task_struct *th = NULL;
957 int retval = 0;
958 struct zvnet_channel *chninfo = NULL;
959
960 if (IS_ERR(zvnetdev)) {
961 return -EINVAL;
962 }
963 chninfo = &(zvnetdev->chn_info);
964 retval = zvnet_createIcpChannel(chninfo->core_id, chninfo->channel_id, chninfo->channel_size);
965 if(retval < 0) {
966 zv_err("Create IcpChannel fail.");
967 return retval;
968 }
969
970 th = kthread_run(zvnet_receive_thread, (void *)zvnetdev, "zvnet-recv%d", chninfo->channel_id);
971 if (IS_ERR(th)) {
972 zv_err("Unable to start receive thread.");
973 return PTR_ERR(th);
974 }
975 chninfo->rcv_thread = th;
976
977 return 0;
978}
979*/
980static int zvnet_release_thread(void * nouse)
981{
982 T_RpMsg_Msg msg = { .coreID = CORE_PS0,
983 .chID = 29,
984 .flag = 0};
985 void *buff[ZVNET_FREE_BUFF_NUM];
986 int i,num,retval;
987 struct sk_buff *skb;
988 struct sched_param param = { .sched_priority = 1 };
989 param.sched_priority = 37;
990 sched_setscheduler(current, SCHED_FIFO, &param);
991
992 while(1) {
993 zv_info("g_zvnet_skb_queue.qlen=%d", g_zvnet_skb_queue.qlen);
994#ifdef USE_ZVNET_PACKET
995 msg.buf = (unsigned char *)(buff); // Êý¾Ý
996 msg.len = 4*ZVNET_FREE_BUFF_NUM;// ¶ÁÈ¡µÄ³¤¶È
997 retval = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
998 zv_info("free read ret=%d", retval);
999 if (retval <= 0) {
1000 zv_err("rpm read err=%d", retval);
1001 msleep(1000);
1002 continue;
1003 }
1004 if((retval%4) != 0) {
1005 panic("err, ret:%d!!!!!!", retval);
1006 }
1007 num = retval>>2;
1008 for(i = 0; i < num; i++){
1009 skb = (struct sk_buff *)buff[i];
1010 zv_info("free 0x%x", skb);
1011 if (skb == NULL || skb->next == NULL || skb->prev == NULL) {
1012 panic("rpm read=%d i=%d NULL", retval, i);
1013 continue;
1014 }
1015 skb_unlink(skb, &g_zvnet_skb_queue);
1016 if(skb->isToap != 1)
1017 panic("");
1018 skb->isToap = 0;
1019 kfree_skb(skb);
1020 }
1021#else
1022 void *buff;
1023 msg.coreID = CORE_PS0;
1024 msg.chID = 29;
1025 msg.buf = (unsigned char *)(&buff); // Êý¾Ý
1026 msg.len = 4;// ¶ÁÈ¡µÄ³¤¶È
1027 //msg.flag |= RPMSG_READ_POLL;
1028
1029 retval = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
1030 if (retval <= 0) {
1031 zv_err("no msg or threand exited");
1032 msleep(1000);
1033 continue;
1034 }
1035 if(retval != 4) {
1036 panic("err, ret:%d!!!!!!", retval);
1037 }
1038 zv_info("free 0x%x", buff);
1039 skb = (struct sk_buff *)buff;
1040 skb_unlink(skb, &g_zvnet_skb_queue);
1041 if(skb->isToap != 1)
1042 panic("");
1043 skb->isToap = 0;
1044 kfree_skb(skb);
1045#endif
1046 }
1047 zv_err("The realse thread exit!");
1048 return 0;
1049}
1050#ifdef USE_ZVNET_PACKET
1051static int zvnet_xmit_warp_thread(void * nouse)
1052{
1053 while(1) {
1054 down_timeout(&g_zvnet_xmit_sem, msecs_to_jiffies(g_wrap_timeout));
1055 zvnet_xmit_packet();
1056 }
1057 zv_err("The xmit warp thread exit!");
1058 return 0;
1059}
1060
1061static int zvnet_free_warp_thread(void * nouse)
1062{
1063 while(1) {
1064 down_timeout(&g_zvnet_free_sem, msecs_to_jiffies(g_wrap_timeout));
1065 write_free_apbuf_packet();
1066 }
1067 zv_err("The free warp thread exit!");
1068 return 0;
1069}
1070#endif
1071
1072static int zvnet_update_thread(void * nouse)
1073{
1074 T_RpMsg_Msg msg = { .coreID = CORE_PS0,
1075 .chID = 21,
1076 .flag = 0};
1077 int ret_len = 0;
1078 struct nf_conn *ct;
1079
1080 while(1) {
1081 struct T_zvnet_rpmsg_ctstat buff = {0};
1082 msg.buf = (void *)(&buff); // Êý¾Ý
1083 msg.len = sizeof(struct T_zvnet_rpmsg_ctstat);// ¶ÁÈ¡µÄ³¤¶È
1084 ret_len = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
1085
1086 if(ret_len <= 0) {
1087 zv_err("rpm read err=%d", ret_len);
1088 msleep(1000);
1089 continue;
1090 }
1091 if(ret_len != sizeof(struct T_zvnet_rpmsg_ctstat)) {
1092 panic("err, ret:%d!!!!!!", ret_len);
1093 }
1094 ct = (struct nf_conn *)buff.cap_nfct;
1095
1096 WARN_ON(atomic_read(&ct->ct_general.use) == 0);
1097 if(buff.flag){
1098 BUG_ON(buff.in <= 0 || buff.out <= 0);
1099 if(buff.pkt[0].pkt && buff.pkt[0].len){
1100 zv_info("nf_update %x %s %s %d %d", buff.cap_nfct, ct->indev[0]->name, ct->outdev[0]->name, buff.in, buff.out);
1101 zvnet_dev[buff.in-1].net->stats.rx_packets += buff.pkt[0].pkt;
1102 zvnet_dev[buff.in-1].net->stats.rx_bytes += buff.pkt[0].len;
1103 zvnet_dev[buff.out-1].net->stats.tx_packets += buff.pkt[0].pkt;
1104 zvnet_dev[buff.out-1].net->stats.tx_bytes += buff.pkt[0].len;
1105 }
1106 if(buff.pkt[1].pkt && buff.pkt[1].len){
1107 zv_info("nf_update %x %s %s %d %d", buff.cap_nfct, ct->indev[1]->name, ct->outdev[1]->name, buff.out, buff.in);
1108 zvnet_dev[buff.out-1].net->stats.rx_packets += buff.pkt[1].pkt;
1109 zvnet_dev[buff.out-1].net->stats.rx_bytes += buff.pkt[1].len;
1110 zvnet_dev[buff.in-1].net->stats.tx_packets += buff.pkt[1].pkt;
1111 zvnet_dev[buff.in-1].net->stats.tx_bytes += buff.pkt[1].len;
1112 }
1113 spin_lock_bh(&fast_fw_spinlock);
1114 ct->packet_info[IP_CT_DIR_ORIGINAL].bytes += buff.pkt[IP_CT_DIR_ORIGINAL].len;
1115 ct->packet_info[IP_CT_DIR_ORIGINAL].packets += buff.pkt[IP_CT_DIR_ORIGINAL].pkt;
1116 ct->packet_info[IP_CT_DIR_REPLY].bytes += buff.pkt[IP_CT_DIR_REPLY].len;
1117 ct->packet_info[IP_CT_DIR_REPLY].packets += buff.pkt[IP_CT_DIR_REPLY].pkt;
1118 spin_unlock_bh(&fast_fw_spinlock);
1119 zv_info("nf_update %x %d %d %d %d", buff.cap_nfct, buff.pkt[0].pkt, buff.pkt[0].len, buff.pkt[1].pkt, buff.pkt[1].len);
1120 }else{
1121 zv_info("nf_put %x", buff.cap_nfct);
1122 nf_conntrack_put(buff.cap_nfct);
1123 }
1124 }
1125 zv_err("The update thread exit!");
1126 return 0;
1127}
1128
1129/*******************************************************************************
1130 * Global function implementations *
1131 ******************************************************************************/
1132static int __init zvnet_init(void)
1133{
1134 int i;
1135 int err = -ENOMEM;
1136 struct zvnet *dev = NULL;
1137 struct net_device *net = NULL;
1138 struct zvnet_device *zvnetdev = NULL;
1139
1140#ifdef USE_ZVNET_PACKET
1141 skb_queue_head_init(&g_zvnet_skb_xmit_queue);
1142 spin_lock_init(&g_zvnet_free_lock);
1143 sema_init(&g_zvnet_free_sem, 0);
1144 sema_init(&g_zvnet_xmit_sem, 0);
1145#endif
1146 skb_queue_head_init(&g_zvnet_skb_queue);
1147 g_zvnet_chn_info.core_id = CORE_PS0;
1148 g_zvnet_chn_info.channel_id = ICP_CHN_ZVNET1;
1149 g_zvnet_chn_info.channel_size = ICP_CHANNEL_SIZE;
1150 for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
1151 zvnetdev = &zvnet_dev[i];
1152 memset(zvnetdev, 0, sizeof(struct zvnet_device));
1153 net = alloc_etherdev(sizeof(struct zvnet));
1154 if (!net) {
1155 zv_err("could not allocate device.\n");
1156 return err;
1157 }
1158
1159 net->needed_headroom += ZVNET_SKB_PAD;//NET_SKB_PAD;
1160 sprintf(net->name, "%s%d", ZVNET_IFNAME_PREFIX, i);
1161 dev = v2xnet_dev_init(net, zvnetdev);
1162 v2xnet_init_netdev(net);
1163 if(0 == i){
1164 net->flags = (net->flags & (~IFF_NOARP));
1165 }
1166 err = register_netdev(net);
1167 if (err) {
1168 zv_err("register_netdev error:%d :%d\n",err,i);
1169 return err;
1170 }
1171 zvnetdev->dev = dev;
1172 zvnetdev->net = net;
1173/*
1174 zvnetdev->chn_info.core_id = CAP_ID;
1175 zvnetdev->chn_info.channel_id = ICP_CHN_ZVNET1 + i;//zvnet_collect[i];
1176 zvnetdev->chn_info.channel_size = ICP_CHANNEL_SIZE;
1177 err = zvnet_channel_create(zvnetdev);
1178 if(0 != err) {
1179 zv_err("zvnet_channel_create error:%d :%d\n",err,i);
1180 goto out_unregister_netdev;
1181 }
1182*/
1183 }
1184 {
1185 struct task_struct *th = NULL;
1186 int retval = 0;
1187 retval = zvnet_createIcpChannel(CORE_PS0, 21, 64);
1188 if(retval < 0) {
1189 zv_err("Create IcpChannel channel_21 fail.");
1190 return retval;
1191 }
1192
1193 th = kthread_run(zvnet_update_thread, 0, "zvnet-update%d", 21);
1194 if (IS_ERR(th)) {
1195 zv_err("Unable to start update thread.");
1196 return PTR_ERR(th);
1197 }
1198 retval = zvnet_createIcpChannel(CORE_PS0, 20, ICP_CHANNEL_SIZE);
1199 if(retval < 0) {
1200 zv_err("Create IcpChannel channel_20 fail.");
1201 return retval;
1202 }
1203
1204 th = kthread_run(zvnet_receive_thread, 0, "zvnet-recv%d", 20);
1205 if (IS_ERR(th)) {
1206 zv_err("Unable to start receive thread.");
1207 return PTR_ERR(th);
1208 }
1209 g_zvnet_chn_info.rcv_thread = th;
1210
1211 retval = zvnet_createIcpChannel(CORE_PS0, 29, ICP_CHANNEL_SIZE);
1212 if(retval < 0) {
1213 zv_err("Create IcpChannel channel_29 fail.");
1214 return retval;
1215 }
1216
1217 th = kthread_run(zvnet_release_thread, 0, "zvnet-free%d", 29);
1218 if (IS_ERR(th)) {
1219 zv_err("Unable to start release thread.");
1220 return PTR_ERR(th);
1221 }
1222#ifdef USE_ZVNET_PACKET
1223 th = kthread_run(zvnet_xmit_warp_thread, 0, "zvnet-xmit-wrap");
1224 if (IS_ERR(th)) {
1225 zv_err("Unable to start xmit_warp thread.");
1226 return PTR_ERR(th);
1227 }
1228
1229 th = kthread_run(zvnet_free_warp_thread, 0, "zvnet-free-wrap");
1230 if (IS_ERR(th)) {
1231 zv_err("Unable to start free_warp thread.");
1232 return PTR_ERR(th);
1233 }
1234#endif
1235 vir_addr_ap = ioremap_cache(DDR_BASE_ADDR_AP, DDR_BASE_LEN_AP);
1236 zv_warn("vir_addr_ap vir=0x%x phy=0x%x len=0x%x", vir_addr_ap, DDR_BASE_ADDR_AP, DDR_BASE_LEN_AP);
1237 if(vir_addr_ap == NULL)
1238 {
1239 zv_err("AP mmap failed.\n");
1240 return -1;
1241 }
1242
1243 }
1244 zv_dbg("success.\n");
1245 return 0;
1246
1247}
1248
1249static void __exit zvnet_exit(void)
1250{
1251 int i;
1252 struct net_device *net;
1253
1254 for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
1255 net = zvnet_dev[i].net;
1256 unregister_netdev(net);
1257 free_netdev(net);
1258 zvnet_dev[i].net = NULL;
1259 }
1260 zv_warn("success.\n");
1261}
1262
1263late_initcall(zvnet_init);
1264module_exit(zvnet_exit);
1265
1266MODULE_AUTHOR("ZXIC");
1267MODULE_DESCRIPTION("ZXIC CAP LAN NET DEVICE");
1268MODULE_LICENSE("GPL");
1269