xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1 | /*******************************************************************************
|
| 2 | * Include header files *
|
| 3 | ******************************************************************************/
|
| 4 | #include <linux/module.h>
|
| 5 | #include <linux/etherdevice.h>
|
| 6 | #include <net/sock.h>
|
| 7 | #include <uapi/linux/sched/types.h>
|
| 8 | #include "zvnet_dev.h"
|
| 9 | #include "ram_config.h"
|
| 10 | #include <net/netfilter/nf_conntrack.h>
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 11 | #include <net/SI/fast_common.h>
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 12 | /*******************************************************************************
|
| 13 | * Macro definitions *
|
| 14 | ******************************************************************************/
|
| 15 | #define USE_ZVNET_PACKET
|
| 16 |
|
| 17 | #define WATCHDOG_TIMEO (5*HZ)
|
| 18 | #define XMIT_RETRANS_TIMES 3
|
| 19 | #define ZVNET_SKB_PAD 128
|
| 20 | #define ZVNET_TMP_BUFF_LEN 2048
|
| 21 | #define ZVNET_FREE_BUFF_NUM 256
|
| 22 | #define ZVNET_XMIT_BUFF_NUM 64
|
| 23 | #define ZVNET_XMIT_MAX_QUEUE_NUM 2048
|
| 24 |
|
| 25 | /*******************************************************************************
|
| 26 | * Type definitions *
|
| 27 | ******************************************************************************/
|
| 28 | //AP´«µÝ¸øCAPµÄÊý¾Ý°üÐÅÏ¢£¬¸ÃÄÚÈÝдÈë¹²ÏíDDR
|
| 29 | struct T_zvnet_rpmsg
|
| 30 | {
|
| 31 | void *buff;//skb_headÖ¸Õ룬ÓÃÓÚÊÍ·Åʱ´«µÝ¸øºË¼ä£¬ÒÔ±ã¿ìËÙÊÍ·Å;
|
| 32 | void *head;//ºË¼äÄÜʹÓõĵØÖ·±ß½ç£¬²»ÄÜÔ½½ç£¬·ñÔòÄÚ´æÒç³öÒì³£;ÎïÀíµØÖ·
|
| 33 | unsigned short data_off;//ºË¼ä´«µÝÀ´µÄÊý¾Ý°üÊ×µØÖ·£¬Ö¸ÏòMACÖ¡Í·;ÎïÀíµØÖ·
|
| 34 | unsigned short len;//Êý¾Ý°üÓÐЧ³¤¶È£¬Ò»°ãΪMACÖ¡³¤¶È
|
| 35 | unsigned short end_off;//end offset
|
| 36 | unsigned char dev;//cid 1->8
|
| 37 | unsigned char flag;//0ÆÕͨ°ü£¬1¶þ´Îת·¢°ü£¬2¶þ´Îfastת·¢°ü
|
| 38 | };
|
| 39 | struct T_zvnet_pkt_stats
|
| 40 | {
|
| 41 | unsigned int pkt;
|
| 42 | unsigned int len;
|
| 43 | };
|
| 44 | //AP´«µÝ¸øCAPµÄCTÐÅÏ¢£¬¸ÃÄÚÈÝдÈë¹²ÏíDDR
|
| 45 | struct T_zvnet_rpmsg_ctstat
|
| 46 | {
|
| 47 | void *cap_nfct;
|
| 48 | unsigned char in;
|
| 49 | unsigned char out;
|
| 50 | unsigned short flag;
|
| 51 | struct T_zvnet_pkt_stats pkt[2];
|
| 52 | };
|
| 53 | /*******************************************************************************
|
| 54 | * Local variable definitions *
|
| 55 | ******************************************************************************/
|
| 56 | struct zvnet_device zvnet_dev[DDR_ZVNET_DEV_MAX];
|
| 57 | int *vir_addr_ap = NULL;
|
| 58 | struct sk_buff_head g_zvnet_skb_queue;
|
| 59 | struct zvnet_channel g_zvnet_chn_info;
|
| 60 |
|
| 61 | #ifdef USE_ZVNET_PACKET
|
| 62 | void *g_zvnet_free_buff[ZVNET_FREE_BUFF_NUM];
|
| 63 | int g_zvnet_free_num;
|
| 64 | spinlock_t g_zvnet_free_lock;
|
| 65 | struct semaphore g_zvnet_free_sem;
|
| 66 | struct semaphore g_zvnet_xmit_sem;
|
| 67 | struct sk_buff_head g_zvnet_skb_xmit_queue;
|
| 68 |
|
| 69 | unsigned int g_wrap_packet_size = 1000;
|
| 70 | module_param(g_wrap_packet_size, int, 0644);
|
| 71 | unsigned int g_wrap_num = 10;
|
| 72 | module_param(g_wrap_num, int, 0644);
|
| 73 | unsigned int g_wrap_timeout = 10;
|
| 74 | module_param(g_wrap_timeout, int, 0644);
|
xf.li | 1867bfa | 2024-08-20 02:32:16 -0700 | [diff] [blame] | 75 | unsigned int g_trace_limit = 0;
|
| 76 | module_param(g_trace_limit, int, 0644);
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 77 | #endif
|
| 78 |
|
| 79 | /*******************************************************************************
|
| 80 | * Global variable definitions *
|
| 81 | ******************************************************************************/
|
| 82 | extern int (*fast_from_driver)(struct sk_buff *skb, struct net_device* dev);
|
| 83 | extern void v7_dma_map_area(const void *, size_t, int);
|
| 84 | extern void *get_ct_for_ap(struct sk_buff *skb);
|
| 85 | extern void put_ct_for_ap(void *ct);
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 86 | /*******************************************************************************
|
| 87 | * Local function declarations *
|
| 88 | ******************************************************************************/
|
| 89 | static int zvnet_open(struct net_device *net);
|
| 90 | static int zvnet_close(struct net_device *net);
|
| 91 | static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net);
|
| 92 | static void zvnet_tx_timeout(struct net_device *net, unsigned int txqueue);
|
| 93 | static struct net_device_stats *zvnet_get_stats(struct net_device *net);
|
| 94 | static void v2xnet_init_netdev(struct net_device *net);
|
| 95 | static void zvnet_skb_return (struct zvnet *dev, struct sk_buff *skb);
|
| 96 | static void zvnet_bh (unsigned long param);
|
| 97 | static struct zvnet *v2xnet_dev_init(struct net_device *net, struct zvnet_device *zvnetdev);
|
| 98 |
|
| 99 | static int zvnet_channel_write(struct zvnet_channel *chninfo, void *buf, unsigned int len);
|
| 100 | static int zvnet_channel_read(struct zvnet_channel *chninfo, void *buf, unsigned int len);
|
| 101 | static int zvnet_channel_clear(struct zvnet_channel *chninfo);
|
| 102 | static int zvnet_read_header(struct zvnet_channel *chninfo, struct zvp_header *phzvp);
|
| 103 | static struct sk_buff *zvnet_direct_read_skb(struct zvnet_channel *chninfo);
|
| 104 | static struct sk_buff *zvnet_read_skb(struct zvnet_channel *chninfo, unsigned int tlen, struct zvnet *dev);
|
| 105 | static int zvnet_receive_thread(void *argv);
|
| 106 | static int rpmsgCreateChannel_v2xnet (T_RpMsg_CoreID dstCoreID, T_RpMsg_ChID chID, unsigned int size);
|
| 107 | static int zvnet_createIcpChannel(T_RpMsg_CoreID core_id, T_RpMsg_ChID channel_id, unsigned int channel_size);
|
| 108 | static int zvnet_channel_create(struct zvnet_device *zvnetdev);
|
| 109 |
|
| 110 | /*******************************************************************************
|
| 111 | * Local function implementations *
|
| 112 | ******************************************************************************/
|
| 113 |
|
| 114 | unsigned long virt_to_phys_ap(unsigned long virt)
|
| 115 | {
|
| 116 | if(virt >= (unsigned long)vir_addr_ap && virt <= ((unsigned long)vir_addr_ap+DDR_BASE_LEN_AP))
|
| 117 | return DDR_BASE_ADDR_AP + (virt - (unsigned long)vir_addr_ap);
|
| 118 | return NULL;
|
| 119 | }
|
| 120 |
|
| 121 | unsigned long phys_to_virt_ap(unsigned long phys)
|
| 122 | {
|
| 123 | if(phys >= DDR_BASE_ADDR_AP && phys <= (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP))
|
| 124 | return (unsigned long)vir_addr_ap + (phys - DDR_BASE_ADDR_AP);
|
| 125 | return NULL;
|
| 126 | }
|
| 127 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 128 | /* Started by AICoder, pid:2fa080381bb2e3d14fbc0aa44091291a60d78e35 */
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 129 | void check_skb_test(struct sk_buff *skb)
|
| 130 | {
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 131 | if (skb && vir_addr_ap) {
|
| 132 | struct sk_buff *tmp_skb;
|
| 133 | if ((skb->capHead && (virt_to_phys_ap(skb->head) == NULL))
|
| 134 | || ((skb->capHead == NULL) && virt_to_phys_ap(skb->head))) {
|
| 135 | dump_stack();
|
| 136 | panic("capHead err");
|
| 137 | }
|
| 138 | skb_queue_walk(&g_zvnet_skb_queue, tmp_skb) {
|
| 139 | if (tmp_skb == skb) {
|
| 140 | dump_stack();
|
| 141 | panic("dup free");
|
| 142 | }
|
| 143 | }
|
| 144 | }
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 145 | }
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 146 | /* Ended by AICoder, pid:2fa080381bb2e3d14fbc0aa44091291a60d78e35 */
|
| 147 |
|
| 148 | /* Started by AICoder, pid:z5702yf8bad07ad1448a083e806dc31250b2418f */
|
xf.li | 1867bfa | 2024-08-20 02:32:16 -0700 | [diff] [blame] | 149 | void zvnet_dump_packet(unsigned char * data, int len, int limit_len)
|
| 150 | {
|
| 151 | int i = 0;
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 152 | unsigned char *p = data;
|
| 153 | for(i = 0; i < len && i < limit_len; i+=16)
|
xf.li | 1867bfa | 2024-08-20 02:32:16 -0700 | [diff] [blame] | 154 | {
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 155 | printk("0x%04x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",i,
|
| 156 | p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],
|
| 157 | p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15]);
|
| 158 | p += 16;
|
xf.li | 1867bfa | 2024-08-20 02:32:16 -0700 | [diff] [blame] | 159 | }
|
| 160 | }
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 161 | /* Ended by AICoder, pid:z5702yf8bad07ad1448a083e806dc31250b2418f */
|
| 162 |
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 163 | int zvnet_get_index_by_netdev(struct net_device *net)
|
| 164 | {
|
| 165 | int i;
|
| 166 |
|
| 167 | for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
|
| 168 | if(zvnet_dev[i].net == net)
|
| 169 | return i;
|
| 170 | }
|
| 171 | return -1;
|
| 172 | }
|
| 173 | #ifdef USE_ZVNET_PACKET
|
| 174 | void write_free_apbuf_packet(void)
|
| 175 | {
|
| 176 | int ret,size;
|
| 177 | unsigned long flags;
|
| 178 | void *buf[ZVNET_FREE_BUFF_NUM];
|
| 179 |
|
| 180 | spin_lock_irqsave(&g_zvnet_free_lock, flags);
|
| 181 | if(g_zvnet_free_num == 0){
|
| 182 | spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
|
| 183 | return;
|
| 184 | }
|
| 185 | size = (g_zvnet_free_num << 2);
|
| 186 | memcpy(buf, g_zvnet_free_buff, size);
|
| 187 | g_zvnet_free_num = 0;
|
| 188 | spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
|
| 189 | {
|
| 190 | T_RpMsg_Msg msg = { .coreID = CORE_PS0,
|
| 191 | .chID = 29,
|
| 192 | .flag = RPMSG_WRITE_INT|RPMSG_WRITE_IRQLOCK};
|
| 193 | if((size >> 2) > ZVNET_FREE_BUFF_NUM)
|
| 194 | panic("free packet err");
|
| 195 | msg.buf = &buf;
|
| 196 | msg.len = size;
|
| 197 | zv_info("tofree size=%d", size);
|
| 198 | ret = zvnetWrite(&msg);
|
| 199 | if(ret < 0)
|
| 200 | panic("err, ret:%d!!!!!!", ret);
|
| 201 | }
|
| 202 | }
|
| 203 | #endif
|
| 204 | //½«CAP²à´«µÝÀ´µÄcapbufÖ¸ÕëдÈëDDR£¬ÒÔICP·½Ê½Í¨ÖªCAP²à
|
| 205 | void write_free_apbuf(void *head)
|
| 206 | {
|
| 207 | #ifdef USE_ZVNET_PACKET
|
| 208 | unsigned long flags;
|
| 209 |
|
| 210 | zv_info("g_zvnet_free_num=%d skb=0x%x", g_zvnet_free_num, head);
|
| 211 | spin_lock_irqsave(&g_zvnet_free_lock, flags);
|
| 212 | g_zvnet_free_buff[g_zvnet_free_num] = head;
|
| 213 | g_zvnet_free_num++;
|
| 214 | if(g_zvnet_free_num == ZVNET_FREE_BUFF_NUM){
|
| 215 | int size;
|
| 216 | void *buf[ZVNET_FREE_BUFF_NUM];
|
| 217 |
|
| 218 | size = (g_zvnet_free_num << 2);
|
| 219 | memcpy(buf, g_zvnet_free_buff, size);
|
| 220 | g_zvnet_free_num = 0;
|
| 221 | spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
|
| 222 | if((size >> 2) > ZVNET_FREE_BUFF_NUM)
|
| 223 | panic("free packet err");
|
| 224 | {
|
| 225 | int ret;
|
| 226 | T_RpMsg_Msg msg = { .coreID = CORE_PS0,
|
| 227 | .chID = 29,
|
| 228 | .flag = RPMSG_WRITE_INT|RPMSG_WRITE_IRQLOCK};
|
| 229 | msg.buf = &buf;
|
| 230 | msg.len = size;
|
| 231 | if(printk_ratelimit())
|
| 232 | zv_warn("tofree quick size=%d", size);
|
| 233 | ret = zvnetWrite(&msg);
|
| 234 | if(ret < 0)
|
| 235 | panic("err, ret:%d!!!!!!", ret);
|
| 236 | }
|
| 237 | return;
|
| 238 | }
|
| 239 | if(g_zvnet_free_num > g_wrap_num)
|
| 240 | up(&g_zvnet_free_sem);
|
| 241 | if(g_zvnet_free_num > ZVNET_FREE_BUFF_NUM)
|
| 242 | panic("free_buff err");
|
| 243 | spin_unlock_irqrestore(&g_zvnet_free_lock, flags);
|
| 244 | #else
|
| 245 | int ret = 0;
|
| 246 | long buf = (long)head;
|
| 247 |
|
| 248 | T_RpMsg_Msg msg = { .coreID = CORE_PS0,
|
| 249 | .chID = 29,
|
| 250 | .flag = RPMSG_WRITE_INT,
|
| 251 | .buf = NULL,
|
| 252 | .len = 4 };
|
| 253 | msg.buf = &buf;
|
| 254 | zv_info("tofree 0x%x", head);
|
| 255 | ret = zvnetWrite(&msg);
|
| 256 | if(ret < 0)
|
| 257 | panic("err, ret:%d!!!!!!", ret);
|
| 258 | #endif
|
| 259 | }
|
| 260 |
|
| 261 | struct sk_buff *skb_build_apbuf(struct T_zvnet_rpmsg *pbuf_temp)
|
| 262 | {
|
| 263 | struct skb_shared_info *shinfo;
|
| 264 | struct sk_buff *skb;
|
| 265 | zv_info("build 0x%x 0x%x %d %d", pbuf_temp->buff, pbuf_temp->head, pbuf_temp->data_off, pbuf_temp->len);
|
| 266 | if((unsigned long )pbuf_temp->head < DDR_BASE_ADDR_AP || (unsigned long )pbuf_temp->head > (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP)){
|
| 267 | zv_err("err 0x%x 0x%x %d %d", pbuf_temp->buff, pbuf_temp->head, pbuf_temp->data_off, pbuf_temp->len);
|
| 268 | panic("addr is not APBUF mem!!!");
|
| 269 | return NULL;
|
| 270 | }
|
| 271 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
|
| 272 | if (!skb)
|
| 273 | {
|
| 274 | write_free_apbuf(pbuf_temp->buff);
|
| 275 | zv_err("alloc fail");
|
| 276 | return NULL;
|
| 277 | }
|
| 278 | memset(skb, 0, offsetof(struct sk_buff, tail));
|
| 279 |
|
| 280 | //ÅжÏÊÇ·ñÊÇapbuff
|
| 281 |
|
| 282 | if(pbuf_temp->dev < 0 || pbuf_temp->dev >= DDR_ZVNET_DEV_MAX)
|
| 283 | panic("dev index error!!!");
|
| 284 | skb->head = phys_to_virt_ap((unsigned long )pbuf_temp->head);
|
| 285 | skb->data = skb->head + pbuf_temp->data_off;
|
| 286 | skb->capHead = pbuf_temp->buff;
|
| 287 | //ÐèÒª¿¼ÂÇcacheÐÐ¶ÔÆë
|
| 288 | skb->truesize = SKB_TRUESIZE(skb->data - skb->head + pbuf_temp->len);//°´µÀÀíβ²¿»¹ÓпÉÓõÄÄÚ´æ¿Õ¼ä£¬ÔÝʱδ¿¼ÂÇ;SKB_DATA_ALIGN
|
| 289 |
|
| 290 | refcount_set(&skb->users, 1);
|
| 291 | skb_reset_tail_pointer(skb);
|
| 292 | skb->tail += pbuf_temp->len;
|
| 293 | skb->len = pbuf_temp->len;
|
| 294 | skb->end = skb->head + pbuf_temp->end_off;
|
| 295 | skb->mac_header = (typeof(skb->mac_header))~0U;
|
| 296 | skb->transport_header = (typeof(skb->transport_header))~0U;
|
| 297 | skb->dev = zvnet_dev[pbuf_temp->dev].net;
|
| 298 | if(skb->len == 0 || skb->len > 2000)
|
| 299 | panic("len ERR!!!!!!!!!!\n");
|
| 300 | v7_dma_map_area(skb->head, sizeof(struct skb_shared_info) + pbuf_temp->end_off, DMA_FROM_DEVICE);
|
| 301 | if(IFF_NOARP & skb->dev->flags)
|
| 302 | memcpy(skb->data, skb->dev->dev_addr, 6);
|
| 303 | atomic_set(&skb_shinfo(skb)->dataref, 1);
|
| 304 | /* make sure we initialize shinfo sequentially */
|
| 305 | skb_reset_network_header(skb);
|
| 306 | skb_set_kcov_handle(skb, kcov_common_handle());
|
xf.li | 1867bfa | 2024-08-20 02:32:16 -0700 | [diff] [blame] | 307 | if(unlikely(g_trace_limit > 0)){
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 308 | printk("-%s-dump_packet-start-%d\n", skb->dev->name, skb->len);
|
| 309 | zvnet_dump_packet(skb->data, skb->len, g_trace_limit);
|
| 310 | printk("-%s-dump_packet-end-\n", skb->dev->name);
|
| 311 | }
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 312 | return skb;
|
| 313 | }
|
| 314 |
|
| 315 | int eth_change_mtu(struct net_device *dev, int new_mtu)
|
| 316 | {
|
| 317 | netdev_warn(dev, "%s is deprecated!\n", __func__);
|
| 318 | dev->mtu = new_mtu;
|
| 319 | return 0;
|
| 320 | }
|
| 321 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 322 | /* Started by AICoder, pid:b001dtf2551fd53146790a57201be3321cf0a682 */
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 323 | static void skb_debug_test(struct sk_buff *skb)
|
| 324 | {
|
| 325 | int i;
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 326 | int vcount = skb->len / 10;
|
| 327 | int rcount = skb->len % 10;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 328 | char tmp[64] = {0};
|
| 329 | char strbuf[64] = {0};
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 330 | const unsigned char *data = skb->data;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 331 |
|
| 332 | zv_info("\n");
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 333 | for (i = 0; i < vcount; i++) {
|
| 334 | zv_info("%d---%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n", i,
|
| 335 | data[0 + 10 * i], data[1 + 10 * i], data[2 + 10 * i], data[3 + 10 * i],
|
| 336 | data[4 + 10 * i], data[5 + 10 * i], data[6 + 10 * i], data[7 + 10 * i],
|
| 337 | data[8 + 10 * i], data[9 + 10 * i]);
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 338 | }
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 339 | if (vcount > 0) {
|
| 340 | memset(tmp, 0, sizeof(tmp));
|
| 341 | sprintf(strbuf, "%d---", vcount);
|
| 342 | char *p = strbuf + strlen(strbuf);
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 343 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 344 | for (i = 0; i < rcount; i++) {
|
| 345 | sprintf(p, "%02x,", data[10 * vcount + i]);
|
| 346 | p += strlen(p);
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 347 | }
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 348 | *(p - 1) = '\0'; // ÒÆ³ý×îºóÒ»¸ö¶ººÅ
|
| 349 | zv_info("%s ", strbuf);
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 350 | }
|
| 351 | zv_info("\n");
|
| 352 | }
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 353 | /* Ended by AICoder, pid:b001dtf2551fd53146790a57201be3321cf0a682 */
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 354 |
|
| 355 | static int zvnet_open(struct net_device *net)
|
| 356 | {
|
| 357 | struct zvnet *dev = netdev_priv(net);
|
| 358 |
|
| 359 | if(net->flags & IFF_UP) {
|
| 360 | zv_dbg("%s has been opened!", dev->net->name);
|
| 361 | return -EBUSY;
|
| 362 | }
|
| 363 | netif_start_queue (net);
|
| 364 |
|
| 365 | return 0;
|
| 366 | }
|
| 367 |
|
| 368 | static int zvnet_close(struct net_device *net)
|
| 369 | {
|
| 370 | struct zvnet *dev = netdev_priv(net);
|
| 371 |
|
| 372 | zv_info("%s", dev->net->name);
|
| 373 | netif_stop_queue(net);
|
| 374 | tasklet_kill (&dev->bh);
|
| 375 |
|
| 376 | return 0;
|
| 377 | }
|
| 378 | #ifdef USE_ZVNET_PACKET
|
| 379 | static void zvnet_xmit_packet(void)
|
| 380 | {
|
| 381 | int i,j,k,ret,num;
|
| 382 | unsigned long flags;
|
| 383 | unsigned long flags1;
|
| 384 | struct sk_buff *skb, *tmp;
|
| 385 | T_RpMsg_Msg msg = { .coreID = CORE_PS0,
|
| 386 | .chID = 20,
|
| 387 | .flag = RPMSG_WRITE_INT};
|
| 388 | static struct T_zvnet_rpmsg buff[ZVNET_XMIT_MAX_QUEUE_NUM+1];
|
| 389 |
|
| 390 | spin_lock_irqsave(&g_zvnet_skb_xmit_queue.lock, flags);
|
| 391 | if (skb_queue_empty(&g_zvnet_skb_xmit_queue)) {
|
| 392 | spin_unlock_irqrestore(&g_zvnet_skb_xmit_queue.lock, flags);
|
| 393 | return;
|
| 394 | }
|
| 395 | i = 0;
|
| 396 | skb_queue_walk_safe(&g_zvnet_skb_xmit_queue, skb, tmp) {
|
| 397 | //buff[i].buff = skb;
|
| 398 | buff[i].data_off = skb->data - skb->head;
|
| 399 | //buff[i].head = virt_to_phys(skb->head);
|
| 400 | buff[i].len = skb->len;
|
| 401 | buff[i].end_off = skb->end - skb->head;
|
| 402 | buff[i].dev = zvnet_get_index_by_netdev(skb->dev);
|
| 403 | if(skb->capHead){
|
| 404 | buff[i].buff = skb->capHead;
|
| 405 | #ifdef CONFIG_FASTNAT_MODULE
|
| 406 | if(skb->isFastnat){
|
| 407 | buff[i].head = get_ct_for_ap(skb);
|
| 408 | buff[i].flag = 2;
|
| 409 | }else
|
| 410 | #endif
|
| 411 | {
|
| 412 | buff[i].head = NULL;
|
| 413 | buff[i].flag = 1;
|
| 414 | }
|
| 415 | __skb_unlink(skb, &g_zvnet_skb_xmit_queue);
|
| 416 | kfree_skb(skb);
|
| 417 | }else{
|
| 418 | buff[i].buff = skb;
|
| 419 | buff[i].head = virt_to_phys(skb->head);
|
| 420 | buff[i].flag = 0;
|
| 421 | }
|
| 422 | i++;
|
| 423 | zv_info("xmit skb=0x%x i=%d", skb, i);
|
| 424 | if(i > ZVNET_XMIT_MAX_QUEUE_NUM){
|
| 425 | panic("qlen:%d!", i);
|
| 426 | break;
|
| 427 | }
|
| 428 | }
|
| 429 | spin_lock_irqsave(&g_zvnet_skb_queue.lock, flags1);
|
| 430 | skb_queue_splice_tail_init(&g_zvnet_skb_xmit_queue, &g_zvnet_skb_queue);
|
| 431 | spin_unlock_irqrestore(&g_zvnet_skb_queue.lock, flags1);
|
| 432 | spin_unlock_irqrestore(&g_zvnet_skb_xmit_queue.lock, flags);
|
| 433 | zv_info("g_zvnet_skb_queue.qlen=%d i=%d", g_zvnet_skb_queue.qlen, i);
|
| 434 | for(j = 0; j < i; j = j + ZVNET_XMIT_BUFF_NUM){
|
| 435 | if(i <= (j + ZVNET_XMIT_BUFF_NUM)){
|
| 436 | msg.buf = (void *)&buff[j];
|
| 437 | msg.len = sizeof(struct T_zvnet_rpmsg)*(i-j);/*±¾´ÎÄÜÈ¡¹â*/
|
| 438 | ret = zvnetWrite(&msg);
|
| 439 | }else{
|
| 440 | msg.buf = (void *)&buff[j];
|
| 441 | msg.len = sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM;
|
| 442 | ret = zvnetWrite(&msg);
|
| 443 | }
|
| 444 | zv_info("xmit write ret=%d size=%d i=%d j=%d", ret, msg.len, i, j);
|
| 445 | if(ret < 0) {
|
| 446 | if(printk_ratelimit())
|
| 447 | zv_warn("zvnet_channel_write ret=%d fail.",ret);
|
| 448 | num = msg.len / sizeof(struct T_zvnet_rpmsg);
|
| 449 | for(k = j; k < j+num; k++){
|
| 450 | if(buff[k].flag == 0){
|
| 451 | skb = (struct sk_buff *)buff[k].buff;
|
| 452 | skb_unlink(skb, &g_zvnet_skb_queue);
|
| 453 | skb->isToap = 0;
|
| 454 | kfree_skb(skb);
|
| 455 | }else{
|
| 456 | if(buff[k].head)
|
| 457 | put_ct_for_ap(buff[k].head);
|
| 458 | write_free_apbuf(buff[k].buff);
|
| 459 | }
|
| 460 | }
|
| 461 | }
|
| 462 | }
|
| 463 | }
|
| 464 | #endif
|
| 465 | static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net)
|
| 466 | {
|
| 467 | #ifdef USE_ZVNET_PACKET
|
| 468 | struct sk_buff *data = NULL;
|
| 469 |
|
| 470 | //zv_info("g_zvnet_skb_xmit_queue.qlen=%d", g_zvnet_skb_xmit_queue.qlen);
|
| 471 | if(g_zvnet_skb_xmit_queue.qlen >= ZVNET_XMIT_MAX_QUEUE_NUM){
|
| 472 | net->stats.tx_errors++;
|
| 473 | net->stats.tx_dropped++;
|
| 474 | zv_err("write err, qlen:%d!", g_zvnet_skb_xmit_queue.qlen);
|
| 475 | kfree_skb(skb);
|
| 476 | return NET_XMIT_SUCCESS;
|
| 477 | }
|
| 478 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 479 | if(unlikely(skb->next//|| skb->capHead || skb_headroom(skb) < NET_SKB_PAD
|
| 480 | || skb->fclone || skb->cloned || (skb_shinfo(skb)->nr_frags) || skb->sk || (skb->indev == NULL)
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 481 | || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) || (skb_has_frag_list(skb)))){
|
| 482 | int ret_len = skb->len;
|
| 483 |
|
| 484 | data = dev_alloc_skb(ret_len + NET_IP_ALIGN);
|
| 485 | if (unlikely(!data)) {
|
| 486 | zv_err("dev_alloc_skb fail,len %d",ret_len);
|
| 487 | net->stats.tx_errors++;
|
| 488 | net->stats.tx_dropped++;
|
| 489 | kfree_skb(skb);
|
| 490 | return NET_XMIT_SUCCESS;
|
| 491 | }
|
| 492 | skb_put(data,ret_len);
|
| 493 | skb_reserve(data, NET_IP_ALIGN);
|
| 494 | memcpy(data->data, skb->data, ret_len);
|
| 495 | zv_info("ap=0x%x next=0x%x clone=%d nr_frags=%d tx_flags=%d frag_list=0x%x", skb->capHead, skb->next, skb->cloned, (skb_shinfo(skb)->nr_frags), skb_shinfo(skb)->tx_flags, skb_shinfo(skb)->frag_list);
|
| 496 | kfree_skb(skb);
|
| 497 | }else{
|
| 498 | data = skb;
|
| 499 | }
|
| 500 | data->dev = net;
|
| 501 | data->isToap = 1;
|
| 502 | v7_dma_map_area(data->head, data->end - data->head + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
|
| 503 | skb_queue_tail(&g_zvnet_skb_xmit_queue, data);
|
| 504 | if(data->len < g_wrap_packet_size || g_zvnet_skb_xmit_queue.qlen > g_wrap_num)
|
| 505 | up(&g_zvnet_xmit_sem);
|
| 506 | net->stats.tx_packets++;
|
| 507 | net->stats.tx_bytes += skb->len;
|
| 508 | #else
|
| 509 | struct zvnet *dev = netdev_priv(net);
|
| 510 | struct zvnet_device *zvnetdev = (struct zvnet_device *)dev->dev_priv;
|
| 511 | int ret = 0;
|
| 512 | struct zvp_header hzvp;
|
| 513 |
|
| 514 | if (!skb) {
|
| 515 | zv_err("err: skb == 0!");
|
| 516 | }
|
| 517 | #if 0
|
| 518 | if (skb->len > ZVNET_TMP_BUFF_LEN) {
|
| 519 | zv_err("err: skb->len(%d)>%d!", skb->len, ZVNET_TMP_BUFF_LEN);
|
| 520 | }
|
| 521 |
|
| 522 | send_header:
|
| 523 | ret = zvnet_channel_write(&(zvnetdev->chn_info), skb->data, skb->len);
|
| 524 |
|
| 525 | if((ret < 0) && (zvnetdev->retran_times < XMIT_RETRANS_TIMES)) {
|
| 526 | zvnetdev->retran_times ++;
|
| 527 | zv_warn("The retran_times is %d.",zvnetdev->retran_times);
|
| 528 | goto send_header;
|
| 529 | }
|
| 530 |
|
| 531 | if (ret >= 0) {
|
| 532 | net->stats.tx_packets++;
|
| 533 | net->stats.tx_bytes += skb->len;
|
| 534 | } else {
|
| 535 | net->stats.tx_errors++;
|
| 536 | net->stats.tx_dropped++;
|
| 537 | zv_err("write err, ret:%d!", ret);
|
| 538 | }
|
| 539 |
|
| 540 | exit:
|
| 541 | kfree_skb(skb);
|
| 542 | #else
|
| 543 | struct T_zvnet_rpmsg buff = {0};
|
| 544 | struct sk_buff *data = NULL;
|
| 545 | if(unlikely(skb_headroom(skb) < NET_SKB_PAD || skb->capHead || skb->next
|
| 546 | || skb->fclone || skb->cloned || (skb_shinfo(skb)->nr_frags)
|
| 547 | || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) || (skb_has_frag_list(skb)))){
|
| 548 | int ret_len = skb->len;
|
| 549 |
|
| 550 | data = dev_alloc_skb(ret_len + NET_IP_ALIGN);
|
| 551 | if (unlikely(!data)) {
|
| 552 | zv_err("dev_alloc_skb fail,len %d",ret_len);
|
| 553 | net->stats.tx_errors++;
|
| 554 | net->stats.tx_dropped++;
|
| 555 | kfree_skb(skb);
|
| 556 | return NET_XMIT_SUCCESS;
|
| 557 | }
|
| 558 | skb_put(data,ret_len);
|
| 559 | skb_reserve(data, NET_IP_ALIGN);
|
| 560 | memcpy(data->data, skb->data, ret_len);
|
| 561 | data->isToap = 1;
|
| 562 | buff.buff = data;
|
| 563 | buff.data_off = data->data - data->head;
|
| 564 | buff.head = virt_to_phys(data->head);
|
| 565 | buff.len = ret_len;
|
| 566 | buff.end_off = data->end - data->head;
|
| 567 | buff.dev = zvnet_get_index_by_netdev(net);
|
| 568 | zv_info("alloc 0x%x 0x%x %d %d", buff.buff, buff.head, buff.data_off, buff.len);
|
| 569 | zv_info("ap=0x%x next=0x%x clone=%d nr_frags=%d tx_flags=%d frag_list=0x%x", skb->capHead, skb->next, skb->cloned, (skb_shinfo(skb)->nr_frags), skb_shinfo(skb)->tx_flags, skb_shinfo(skb)->frag_list);
|
| 570 | v7_dma_map_area(data->head, buff.end_off + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
|
| 571 | }else{
|
| 572 | skb->isToap = 1;
|
| 573 | buff.buff = skb;
|
| 574 | buff.data_off = skb->data - skb->head;
|
| 575 | buff.head = virt_to_phys(skb->head);
|
| 576 | buff.len = skb->len;
|
| 577 | buff.end_off = skb->end - skb->head;
|
| 578 | buff.dev = zvnet_get_index_by_netdev(net);
|
| 579 | zv_info("transfer 0x%x %d 0x%x %d", buff.buff, buff.head, buff.data_off, buff.len);
|
| 580 | v7_dma_map_area(skb->head, buff.end_off + sizeof(struct skb_shared_info), DMA_TO_DEVICE);
|
| 581 | }
|
| 582 | send_header:
|
| 583 | ret = zvnet_channel_write(&g_zvnet_chn_info, &buff, sizeof(struct T_zvnet_rpmsg));
|
| 584 |
|
| 585 | if((ret < 0) && (zvnetdev->retran_times < XMIT_RETRANS_TIMES)) {
|
| 586 | zvnetdev->retran_times ++;
|
| 587 | zv_warn("The retran_times is %d.",zvnetdev->retran_times);
|
| 588 | goto send_header;
|
| 589 | }
|
| 590 |
|
| 591 | if (ret >= 0) {
|
| 592 | net->stats.tx_packets++;
|
| 593 | net->stats.tx_bytes += skb->len;
|
| 594 | if(data){
|
| 595 | kfree_skb(skb);
|
| 596 | skb_queue_tail(&g_zvnet_skb_queue, data);
|
| 597 | }else
|
| 598 | skb_queue_tail(&g_zvnet_skb_queue, skb);
|
| 599 | zvnetdev->retran_times = 0;
|
| 600 | } else {
|
| 601 | net->stats.tx_errors++;
|
| 602 | net->stats.tx_dropped++;
|
| 603 | zv_err("write err, ret:%d!", ret);
|
| 604 | if(data){
|
| 605 | data->isToap = 0;
|
| 606 | kfree_skb(data);
|
| 607 | }
|
| 608 | else
|
| 609 | skb->isToap = 0;
|
| 610 | kfree_skb(skb);
|
| 611 | }
|
| 612 | #endif
|
| 613 | #endif
|
| 614 | return NET_XMIT_SUCCESS;
|
| 615 | }
|
| 616 |
|
| 617 | /* Called by the kernel when transmit times out */
|
| 618 | static void zvnet_tx_timeout(struct net_device *net, unsigned int txqueue)
|
| 619 | {
|
| 620 | zv_warn("sent timeout!");
|
| 621 | net->stats.tx_errors++;
|
| 622 | netif_wake_queue(net);
|
| 623 | }
|
| 624 |
|
| 625 | static struct net_device_stats *zvnet_get_stats(struct net_device *net)
|
| 626 | {
|
| 627 | return &net->stats;
|
| 628 | }
|
| 629 |
|
| 630 | const struct net_device_ops zvnet_netdev_ops = {
|
| 631 | .ndo_open = zvnet_open,
|
| 632 | .ndo_stop = zvnet_close,
|
| 633 | .ndo_start_xmit = zvnet_xmit,
|
| 634 | .ndo_tx_timeout = zvnet_tx_timeout,
|
| 635 | .ndo_get_stats = zvnet_get_stats,
|
| 636 | .ndo_change_mtu = eth_change_mtu,
|
| 637 | .ndo_validate_addr = eth_validate_addr,
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 638 | .ndo_set_mac_address = eth_mac_addr,
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 639 | };
|
| 640 |
|
| 641 | static void v2xnet_init_netdev(struct net_device *net)
|
| 642 | {
|
| 643 | u8 node_id [ETH_ALEN];
|
| 644 |
|
| 645 | random_ether_addr(node_id);
|
| 646 | memcpy (net->dev_addr, node_id, sizeof node_id);
|
| 647 |
|
| 648 | net->netdev_ops = &zvnet_netdev_ops;
|
| 649 | net->watchdog_timeo = WATCHDOG_TIMEO;
|
| 650 | net->flags |= IFF_NOARP;
|
| 651 | }
|
| 652 |
|
| 653 | static void zvnet_skb_return (struct zvnet *dev, struct sk_buff *skb)
|
| 654 | {
|
| 655 | int status;
|
| 656 |
|
| 657 | //zv_info("enter...");
|
| 658 |
|
| 659 | //skb->protocol = eth_type_trans(skb, dev->net);
|
| 660 |
|
| 661 | status = netif_rx (skb);
|
| 662 | if (status == NET_RX_SUCCESS) {
|
| 663 | dev->net->stats.rx_packets++;
|
| 664 | dev->net->stats.rx_bytes += skb->len;
|
| 665 | } else {
|
| 666 | dev->net->stats.rx_errors++;
|
| 667 | zv_err("netif_rx status %d.", status);
|
| 668 | }
|
| 669 | }
|
| 670 |
|
| 671 | static void zvnet_bh (unsigned long param)
|
| 672 | {
|
| 673 | struct zvnet *dev = (struct zvnet *)param;
|
| 674 | struct sk_buff *skb;
|
| 675 |
|
| 676 | while((skb = skb_dequeue(&dev->rxq)) != NULL) {
|
| 677 | if (skb->len)
|
| 678 | zvnet_skb_return(dev, skb);
|
| 679 | else {
|
| 680 | dev->net->stats.rx_errors++;
|
| 681 | dev_kfree_skb (skb);
|
| 682 | zv_err("drop!!!ddrnet_bh skb len == 0.");
|
| 683 | }
|
| 684 | }
|
| 685 | }
|
| 686 |
|
| 687 | static struct zvnet *v2xnet_dev_init(struct net_device *net, struct zvnet_device *zvnetdev)
|
| 688 | {
|
| 689 | struct zvnet *dev = NULL;
|
| 690 |
|
| 691 | dev = netdev_priv(net);
|
| 692 | if(!dev) {
|
| 693 | zv_err("dev is null.\n");
|
| 694 | return NULL;
|
| 695 | }
|
| 696 |
|
| 697 | dev->net = net;
|
| 698 | dev->bh.func = zvnet_bh;
|
| 699 | dev->bh.data = (unsigned long) dev;
|
| 700 |
|
| 701 | skb_queue_head_init (&dev->rxq);
|
| 702 |
|
| 703 | dev->dev_priv = zvnetdev;
|
| 704 |
|
| 705 | return dev;
|
| 706 | }
|
| 707 |
|
| 708 | /*·µ»ØÖµ´óÓÚµÈÓÚ0£¬±íʾдͨµÀ³É¹¦£»Ð¡ÓÚ0±íʾдͨµÀʧ°Ü*/
|
| 709 | static int zvnet_channel_write(struct zvnet_channel *chninfo, void *buf, unsigned int len)
|
| 710 | {
|
| 711 | T_RpMsg_Msg msg;
|
| 712 |
|
| 713 | if(NULL == buf) {
|
| 714 | return -EINVAL;
|
| 715 | }
|
| 716 | memset(&msg, 0, sizeof(msg));
|
| 717 | msg.coreID = chninfo->core_id;
|
| 718 | msg.chID = chninfo->channel_id;
|
| 719 | msg.flag |= RPMSG_WRITE_INT; //| RPMSG_WRITE_IRQLOCK;
|
| 720 | msg.buf = buf;
|
| 721 | msg.len = len;
|
| 722 |
|
| 723 | return zvnetWrite(&msg);
|
| 724 | }
|
| 725 |
|
| 726 | /*·µ»ØÖµ´óÓÚ0£¬±íʾ¶ÁȡͨµÀ³É¹¦£»Ð¡ÓÚµÈÓÚ0±íʾͨµÀÊý¾ÝΪ¿Õ»òʧ°Ü*/
|
| 727 | static int zvnet_channel_read(struct zvnet_channel *chninfo, void *buf, unsigned int len)
|
| 728 | {
|
| 729 | T_RpMsg_Msg msg;
|
| 730 | int ret = 0;
|
| 731 |
|
| 732 | if(NULL == buf) {
|
| 733 | return -EINVAL;
|
| 734 | }
|
| 735 |
|
| 736 | memset(&msg, 0, sizeof(msg));
|
| 737 | msg.coreID = chninfo->core_id;
|
| 738 | msg.chID = chninfo->channel_id;
|
| 739 | msg.buf = buf;
|
| 740 | msg.len = len;
|
| 741 |
|
| 742 | ret = zvnetRead(&msg);
|
| 743 | if (ret <= 0) {
|
| 744 | zv_err("rpm read err=%d!",ret);
|
| 745 | return ret;
|
| 746 | }
|
| 747 |
|
| 748 | return ret;
|
| 749 | }
|
| 750 |
|
| 751 | static int zvnet_channel_clear(struct zvnet_channel *chninfo)
|
| 752 | {
|
| 753 | char *tbuf = NULL;
|
| 754 | unsigned int tlen = chninfo->channel_size/2;
|
| 755 | int ret = 0;
|
| 756 |
|
| 757 | tbuf = (char *)kzalloc(tlen,GFP_ATOMIC);
|
| 758 | if(IS_ERR(tbuf)) {
|
| 759 | zv_err("kzalloc fail! %d byte.", tlen);
|
| 760 | return -ENOMEM;
|
| 761 | }
|
| 762 | ret = zvnet_channel_read(chninfo, tbuf, tlen);
|
| 763 | if(ret < 0) {
|
| 764 | zv_err("zvnet_channel_read fail!");
|
| 765 | ret = 0;
|
| 766 | }
|
| 767 | kfree(tbuf);
|
| 768 | zv_err("Drop channel data. %d byte.",ret);
|
| 769 |
|
| 770 | return ret;
|
| 771 | }
|
| 772 |
|
| 773 | static int zvnet_read_header(struct zvnet_channel *chninfo, struct zvp_header *phzvp)
|
| 774 | {
|
| 775 | return zvnet_channel_read(chninfo, phzvp, sizeof(struct zvp_header));
|
| 776 | }
|
| 777 |
|
| 778 | static struct sk_buff *zvnet_read_skb(struct zvnet_channel *chninfo, unsigned int tlen, struct zvnet *dev)
|
| 779 | {
|
| 780 | struct sk_buff *skb;
|
| 781 |
|
| 782 | if(NULL == chninfo || 0 >= tlen || NULL == dev) {
|
| 783 | return NULL;
|
| 784 | }
|
| 785 | skb = dev_alloc_skb(tlen);
|
| 786 | if (unlikely(!skb)) {
|
| 787 | zv_err("netdev_alloc_skb fail,len %d",tlen);
|
| 788 | return NULL;
|
| 789 | }
|
| 790 | skb_put(skb,tlen);
|
| 791 |
|
| 792 | if(zvnet_channel_read(chninfo, (void *)skb->data, tlen) != tlen) {
|
| 793 | zv_err("zvnet_channel_read fail.\n");
|
| 794 | kfree_skb(skb);
|
| 795 | return NULL;
|
| 796 | }
|
| 797 |
|
| 798 | zv_info("%s dev receive packet %d byte.",dev->net->name, tlen);
|
| 799 |
|
| 800 | skb->dev = dev->net;
|
| 801 |
|
| 802 | return skb;
|
| 803 | }
|
| 804 |
|
| 805 | static struct sk_buff *zvnet_direct_read_skb(struct zvnet_channel *chninfo)
|
| 806 | {
|
| 807 | struct sk_buff *skb;
|
| 808 | #if 0
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 809 | /* Started by AICoder, pid:sd1cfsbc2eu87c41445f09652039f525fa147687 */
|
| 810 | int ret_len = 0;
|
| 811 | struct sk_buff *skb;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 812 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 813 | ret_len = zvnet_channel_read(chninfo, NULL, 0); // »ñÈ¡ÐèÒª¶ÁÈ¡µÄÊý¾Ý³¤¶È
|
| 814 | if(ret_len <= 0) {
|
| 815 | zv_err("zvnet_channel_read fail.\n");
|
| 816 | return NULL;
|
| 817 | }
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 818 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 819 | skb = dev_alloc_skb(ret_len + ZVNET_SKB_PAD);
|
| 820 | if (unlikely(!skb)) {
|
| 821 | zv_err("netdev_alloc_skb fail,len %d",ret_len);
|
| 822 | return NULL;
|
| 823 | }
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 824 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 825 | ret_len = zvnet_channel_read(chninfo, skb->data, ret_len); // ¶ÁÈ¡Êý¾Ýµ½skb->data
|
| 826 | if(ret_len <= 0) {
|
| 827 | kfree_skb(skb); // Èç¹û¶Áȡʧ°Ü£¬ÊÍ·ÅÒÑ·ÖÅäµÄskb
|
| 828 | zv_err("zvnet_channel_read fail.\n");
|
| 829 | return NULL;
|
| 830 | }
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 831 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 832 | skb_put(skb,ret_len);
|
| 833 | skb_reserve(skb, ZVNET_SKB_PAD);
|
| 834 | /* Ended by AICoder, pid:sd1cfsbc2eu87c41445f09652039f525fa147687 */
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 835 | #else
|
| 836 | struct T_zvnet_rpmsg buff = {0};
|
| 837 | int ret_len = 0;
|
| 838 | ret_len = zvnet_channel_read(chninfo, (void *)&buff, sizeof(struct T_zvnet_rpmsg));
|
| 839 |
|
| 840 | if(ret_len <= 0) {
|
| 841 | zv_err("rpm read err=%d", ret_len);
|
| 842 | msleep(1000);
|
| 843 | return NULL;
|
| 844 | }
|
| 845 | if(ret_len != sizeof(struct T_zvnet_rpmsg)) {
|
| 846 | panic("err, ret:%d!!!!!!", ret_len);
|
| 847 | }
|
| 848 | skb = skb_build_apbuf(&buff);
|
| 849 | if (unlikely(!skb)) {
|
| 850 | zv_err("netdev_alloc_skb fail,len %d",ret_len);
|
| 851 | return NULL;
|
| 852 | }
|
| 853 | #endif
|
| 854 | //skb->dev = dev->net;
|
| 855 | return skb;
|
| 856 | }
|
| 857 |
|
| 858 | static int zvnet_receive_thread(void *argv)
|
| 859 | {
|
| 860 | //struct zvnet_device *zvnetdev = (struct zvnet_device *)argv;
|
| 861 | //struct zvnet_channel *chninfo = NULL;
|
| 862 | struct zvnet *dev = NULL;
|
| 863 | int index,ret_len,i,num;
|
| 864 | unsigned long flags;
|
| 865 | struct sk_buff *skb = NULL;
|
| 866 | T_RpMsg_Msg msg = { .coreID = CORE_PS0,
|
| 867 | .chID = 20,
|
| 868 | .flag = 0};
|
| 869 | struct T_zvnet_rpmsg buff[ZVNET_XMIT_BUFF_NUM];
|
| 870 | //struct zvp_header hzvp;
|
| 871 | /*
|
| 872 | if(IS_ERR(zvnetdev)) {
|
| 873 | zv_err("The receive thread create fail!");
|
| 874 | return -EINVAL;
|
| 875 | }
|
| 876 | chninfo = &zvnetdev->chn_info;
|
| 877 | dev = zvnetdev->dev;
|
| 878 | */
|
| 879 | while(1) {
|
| 880 | /*
|
| 881 | if(unlikely(!(zvnetdev->net->flags & IFF_UP))) {
|
| 882 | msleep(1000);
|
| 883 | continue;
|
| 884 | }
|
| 885 | */
|
| 886 | //memset(&hzvp, 0, sizeof(hzvp));
|
| 887 | #ifdef USE_ZVNET_PACKET
|
| 888 | //ret_len = zvnet_channel_read(&g_zvnet_chn_info, (void *)buff, sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM);
|
| 889 | msg.buf = (void *)(buff); // Êý¾Ý
|
| 890 | msg.len = sizeof(struct T_zvnet_rpmsg)*ZVNET_XMIT_BUFF_NUM;// ¶ÁÈ¡µÄ³¤¶È
|
| 891 | ret_len = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
|
| 892 | zv_info("zvnetRead ret=%d", ret_len);
|
| 893 | if(ret_len <= 0) {
|
| 894 | zv_err("rpm read err=%d", ret_len);
|
| 895 | msleep(1000);
|
| 896 | continue;
|
| 897 | }
|
| 898 | if((ret_len % sizeof(struct T_zvnet_rpmsg)) != 0) {
|
| 899 | panic("err, ret:%d!!!!!!", ret_len);
|
| 900 | }
|
| 901 | num = ret_len / sizeof(struct T_zvnet_rpmsg);
|
| 902 | for(i = 0; i < num; i++){
|
| 903 | skb = skb_build_apbuf(&buff[i]);
|
| 904 | if (unlikely(!skb)) {
|
| 905 | zv_err("skb_build_apbuf fail,len=%d i=%d",ret_len,i);
|
| 906 | continue;
|
| 907 | }
|
| 908 | if(unlikely(!(skb->dev->flags & IFF_UP))) {
|
| 909 | if(printk_ratelimit())
|
| 910 | zv_err("drop!!!%s is down.", skb->dev->name);
|
| 911 | dev_kfree_skb (skb);
|
| 912 | continue;
|
| 913 | }
|
| 914 | skb->protocol = eth_type_trans(skb, skb->dev);
|
| 915 | if (fast_from_driver && fast_from_driver(skb, skb->dev))
|
| 916 | {
|
| 917 | continue;
|
| 918 | }
|
| 919 | index = zvnet_get_index_by_netdev(skb->dev);
|
| 920 | if(index < 0)
|
| 921 | panic("");
|
| 922 | dev = zvnet_dev[index].dev;
|
| 923 | spin_lock_irqsave(&dev->rxq.lock, flags);
|
| 924 | __skb_queue_tail(&dev->rxq, skb);
|
| 925 | spin_unlock_irqrestore(&dev->rxq.lock, flags);
|
| 926 | tasklet_schedule(&dev->bh);
|
| 927 | }
|
| 928 | #else
|
| 929 | if(0 != (skb = zvnet_direct_read_skb(&g_zvnet_chn_info))) {
|
| 930 | //skb_debug_test(skb);
|
| 931 | if(unlikely(!(skb->dev->flags & IFF_UP))) {
|
| 932 | zv_err("drop!!!%s is down.", skb->dev->name);
|
| 933 | dev_kfree_skb (skb);
|
| 934 | continue;
|
| 935 | }
|
| 936 | skb->protocol = eth_type_trans(skb, skb->dev);
|
| 937 | #if 1
|
| 938 | if (fast_from_driver && fast_from_driver(skb, skb->dev))
|
| 939 | {
|
| 940 | continue;
|
| 941 | }
|
| 942 | #endif
|
| 943 | index = zvnet_get_index_by_netdev(skb->dev);
|
| 944 | if(index < 0)
|
| 945 | panic("");
|
| 946 | dev = zvnet_dev[index].dev;
|
| 947 | spin_lock_irqsave(&dev->rxq.lock, flags);
|
| 948 | __skb_queue_tail(&dev->rxq, skb);
|
| 949 | spin_unlock_irqrestore(&dev->rxq.lock, flags);
|
| 950 | tasklet_schedule(&dev->bh);
|
| 951 | }
|
| 952 | else {
|
| 953 | zv_err("zvnet_read_header fail.");
|
| 954 | msleep(1000);
|
| 955 | }
|
| 956 | #endif
|
| 957 | }
|
| 958 |
|
| 959 | zv_err("The receive thread exit!");
|
| 960 | return 0;
|
| 961 | }
|
| 962 |
|
| 963 | static int rpmsgCreateChannel_v2xnet (T_RpMsg_CoreID dstCoreID, T_RpMsg_ChID chID, unsigned int size)
|
| 964 | {
|
| 965 | return zvnetCreateChannel (dstCoreID, chID, size);
|
| 966 | }
|
| 967 |
|
| 968 | static int zvnet_createIcpChannel(T_RpMsg_CoreID core_id, T_RpMsg_ChID channel_id, unsigned int channel_size)
|
| 969 | {
|
| 970 | int retval;
|
| 971 |
|
| 972 | retval = rpmsgCreateChannel_v2xnet (core_id, channel_id, channel_size);
|
| 973 | if(retval != RPMSG_SUCCESS && retval != RPMSG_CHANNEL_ALREADY_EXIST)
|
| 974 | goto out;
|
| 975 |
|
| 976 | return retval;
|
| 977 |
|
| 978 | out:
|
| 979 | zv_err("could not create channel.");
|
| 980 | return retval;
|
| 981 | }
|
| 982 | /*
|
| 983 | static int zvnet_channel_create(struct zvnet_device *zvnetdev)
|
| 984 | {
|
| 985 | struct task_struct *th = NULL;
|
| 986 | int retval = 0;
|
| 987 | struct zvnet_channel *chninfo = NULL;
|
| 988 |
|
| 989 | if (IS_ERR(zvnetdev)) {
|
| 990 | return -EINVAL;
|
| 991 | }
|
| 992 | chninfo = &(zvnetdev->chn_info);
|
| 993 | retval = zvnet_createIcpChannel(chninfo->core_id, chninfo->channel_id, chninfo->channel_size);
|
| 994 | if(retval < 0) {
|
| 995 | zv_err("Create IcpChannel fail.");
|
| 996 | return retval;
|
| 997 | }
|
| 998 |
|
| 999 | th = kthread_run(zvnet_receive_thread, (void *)zvnetdev, "zvnet-recv%d", chninfo->channel_id);
|
| 1000 | if (IS_ERR(th)) {
|
| 1001 | zv_err("Unable to start receive thread.");
|
| 1002 | return PTR_ERR(th);
|
| 1003 | }
|
| 1004 | chninfo->rcv_thread = th;
|
| 1005 |
|
| 1006 | return 0;
|
| 1007 | }
|
| 1008 | */
|
| 1009 | static int zvnet_release_thread(void * nouse)
|
| 1010 | {
|
| 1011 | T_RpMsg_Msg msg = { .coreID = CORE_PS0,
|
| 1012 | .chID = 29,
|
| 1013 | .flag = 0};
|
| 1014 | void *buff[ZVNET_FREE_BUFF_NUM];
|
| 1015 | int i,num,retval;
|
| 1016 | struct sk_buff *skb;
|
| 1017 | struct sched_param param = { .sched_priority = 1 };
|
| 1018 | param.sched_priority = 37;
|
| 1019 | sched_setscheduler(current, SCHED_FIFO, ¶m);
|
| 1020 |
|
| 1021 | while(1) {
|
| 1022 | zv_info("g_zvnet_skb_queue.qlen=%d", g_zvnet_skb_queue.qlen);
|
| 1023 | #ifdef USE_ZVNET_PACKET
|
| 1024 | msg.buf = (unsigned char *)(buff); // Êý¾Ý
|
| 1025 | msg.len = 4*ZVNET_FREE_BUFF_NUM;// ¶ÁÈ¡µÄ³¤¶È
|
| 1026 | retval = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
|
| 1027 | zv_info("free read ret=%d", retval);
|
| 1028 | if (retval <= 0) {
|
| 1029 | zv_err("rpm read err=%d", retval);
|
| 1030 | msleep(1000);
|
| 1031 | continue;
|
| 1032 | }
|
| 1033 | if((retval%4) != 0) {
|
| 1034 | panic("err, ret:%d!!!!!!", retval);
|
| 1035 | }
|
| 1036 | num = retval>>2;
|
| 1037 | for(i = 0; i < num; i++){
|
| 1038 | skb = (struct sk_buff *)buff[i];
|
| 1039 | zv_info("free 0x%x", skb);
|
| 1040 | if (skb == NULL || skb->next == NULL || skb->prev == NULL) {
|
| 1041 | panic("rpm read=%d i=%d NULL", retval, i);
|
| 1042 | continue;
|
| 1043 | }
|
| 1044 | skb_unlink(skb, &g_zvnet_skb_queue);
|
| 1045 | if(skb->isToap != 1)
|
| 1046 | panic("");
|
| 1047 | skb->isToap = 0;
|
| 1048 | kfree_skb(skb);
|
| 1049 | }
|
| 1050 | #else
|
| 1051 | void *buff;
|
| 1052 | msg.coreID = CORE_PS0;
|
| 1053 | msg.chID = 29;
|
| 1054 | msg.buf = (unsigned char *)(&buff); // Êý¾Ý
|
| 1055 | msg.len = 4;// ¶ÁÈ¡µÄ³¤¶È
|
| 1056 | //msg.flag |= RPMSG_READ_POLL;
|
| 1057 |
|
| 1058 | retval = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
|
| 1059 | if (retval <= 0) {
|
| 1060 | zv_err("no msg or threand exited");
|
| 1061 | msleep(1000);
|
| 1062 | continue;
|
| 1063 | }
|
| 1064 | if(retval != 4) {
|
| 1065 | panic("err, ret:%d!!!!!!", retval);
|
| 1066 | }
|
| 1067 | zv_info("free 0x%x", buff);
|
| 1068 | skb = (struct sk_buff *)buff;
|
| 1069 | skb_unlink(skb, &g_zvnet_skb_queue);
|
| 1070 | if(skb->isToap != 1)
|
| 1071 | panic("");
|
| 1072 | skb->isToap = 0;
|
| 1073 | kfree_skb(skb);
|
| 1074 | #endif
|
| 1075 | }
|
| 1076 | zv_err("The realse thread exit!");
|
| 1077 | return 0;
|
| 1078 | }
|
| 1079 | #ifdef USE_ZVNET_PACKET
|
| 1080 | static int zvnet_xmit_warp_thread(void * nouse)
|
| 1081 | {
|
| 1082 | while(1) {
|
| 1083 | down_timeout(&g_zvnet_xmit_sem, msecs_to_jiffies(g_wrap_timeout));
|
| 1084 | zvnet_xmit_packet();
|
| 1085 | }
|
| 1086 | zv_err("The xmit warp thread exit!");
|
| 1087 | return 0;
|
| 1088 | }
|
| 1089 |
|
| 1090 | static int zvnet_free_warp_thread(void * nouse)
|
| 1091 | {
|
| 1092 | while(1) {
|
| 1093 | down_timeout(&g_zvnet_free_sem, msecs_to_jiffies(g_wrap_timeout));
|
| 1094 | write_free_apbuf_packet();
|
| 1095 | }
|
| 1096 | zv_err("The free warp thread exit!");
|
| 1097 | return 0;
|
| 1098 | }
|
| 1099 | #endif
|
| 1100 |
|
| 1101 | static int zvnet_update_thread(void * nouse)
|
| 1102 | {
|
| 1103 | T_RpMsg_Msg msg = { .coreID = CORE_PS0,
|
| 1104 | .chID = 21,
|
| 1105 | .flag = 0};
|
| 1106 | int ret_len = 0;
|
| 1107 | struct nf_conn *ct;
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1108 | fast_entry_t *entry;
|
| 1109 | struct net_device *in;
|
| 1110 | struct net_device *out;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1111 |
|
| 1112 | while(1) {
|
| 1113 | struct T_zvnet_rpmsg_ctstat buff = {0};
|
| 1114 | msg.buf = (void *)(&buff); // Êý¾Ý
|
| 1115 | msg.len = sizeof(struct T_zvnet_rpmsg_ctstat);// ¶ÁÈ¡µÄ³¤¶È
|
| 1116 | ret_len = zvnetRead(&msg); // ¶ÁÈ¡»·ÐζÓÁÐÖÐÒ»¸ö½Úµã£¬
|
| 1117 |
|
| 1118 | if(ret_len <= 0) {
|
| 1119 | zv_err("rpm read err=%d", ret_len);
|
| 1120 | msleep(1000);
|
| 1121 | continue;
|
| 1122 | }
|
| 1123 | if(ret_len != sizeof(struct T_zvnet_rpmsg_ctstat)) {
|
| 1124 | panic("err, ret:%d!!!!!!", ret_len);
|
| 1125 | }
|
| 1126 | ct = (struct nf_conn *)buff.cap_nfct;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1127 | WARN_ON(atomic_read(&ct->ct_general.use) == 0);
|
| 1128 | if(buff.flag){
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1129 | if(!(buff.pkt[0].pkt || buff.pkt[0].len || buff.pkt[1].pkt || buff.pkt[1].len))
|
| 1130 | continue;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1131 | BUG_ON(buff.in <= 0 || buff.out <= 0);
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1132 | in = zvnet_dev[buff.in-1].net;
|
| 1133 | out = zvnet_dev[buff.out-1].net;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1134 | if(buff.pkt[0].pkt && buff.pkt[0].len){
|
| 1135 | zv_info("nf_update %x %s %s %d %d", buff.cap_nfct, ct->indev[0]->name, ct->outdev[0]->name, buff.in, buff.out);
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1136 | in->stats.rx_packets += buff.pkt[0].pkt;
|
| 1137 | in->stats.rx_bytes += buff.pkt[0].len;
|
| 1138 | out->stats.tx_packets += buff.pkt[0].pkt;
|
| 1139 | out->stats.tx_bytes += buff.pkt[0].len;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1140 | }
|
| 1141 | if(buff.pkt[1].pkt && buff.pkt[1].len){
|
| 1142 | zv_info("nf_update %x %s %s %d %d", buff.cap_nfct, ct->indev[1]->name, ct->outdev[1]->name, buff.out, buff.in);
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1143 | out->stats.rx_packets += buff.pkt[1].pkt;
|
| 1144 | out->stats.rx_bytes += buff.pkt[1].len;
|
| 1145 | in->stats.tx_packets += buff.pkt[1].pkt;
|
| 1146 | in->stats.tx_bytes += buff.pkt[1].len;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1147 | }
|
| 1148 | spin_lock_bh(&fast_fw_spinlock);
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1149 | /*¸üÐÂÁ´½Ó³¬Ê±*/
|
| 1150 | if (IPPROTO_TCP == nf_ct_protonum(ct))
|
| 1151 | {
|
| 1152 | ct->timeout = jiffies + tcp_timeouts[ct->proto.tcp.state];
|
| 1153 | }else if (IPPROTO_UDP == nf_ct_protonum(ct)){
|
| 1154 | /*udp*/
|
| 1155 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)){
|
| 1156 | ct->timeout = jiffies + fast_udp_timeout_stream;
|
| 1157 | }else{
|
| 1158 | ct->timeout = jiffies + fast_udp_timeout;
|
| 1159 | }
|
| 1160 | }
|
| 1161 | entry = (fast_entry_t *)ct->fast_entry;
|
| 1162 | if(entry){
|
| 1163 | WARN_ON(entry->ct != ct);
|
| 1164 | mod_timer(&entry->timeout, ct->timeout);
|
| 1165 | }
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1166 | ct->packet_info[IP_CT_DIR_ORIGINAL].bytes += buff.pkt[IP_CT_DIR_ORIGINAL].len;
|
| 1167 | ct->packet_info[IP_CT_DIR_ORIGINAL].packets += buff.pkt[IP_CT_DIR_ORIGINAL].pkt;
|
| 1168 | ct->packet_info[IP_CT_DIR_REPLY].bytes += buff.pkt[IP_CT_DIR_REPLY].len;
|
| 1169 | ct->packet_info[IP_CT_DIR_REPLY].packets += buff.pkt[IP_CT_DIR_REPLY].pkt;
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1170 | if(ct->indev[0] && is_vlan_dev(ct->indev[0])){
|
| 1171 | struct net_device *tmp = vlan_dev_real_dev(ct->indev[0]);
|
| 1172 | struct vlan_pcpu_stats *stats = this_cpu_ptr(vlan_dev_priv(ct->indev[0])->vlan_pcpu_stats);
|
| 1173 |
|
| 1174 | if(tmp == in){
|
| 1175 | /* Started by AICoder, pid:tbef0151bf4135d1479d0a5d108c870bc756e858 */
|
| 1176 | u64_stats_update_begin(&stats->syncp);
|
| 1177 | stats->rx_packets += buff.pkt[0].pkt;
|
| 1178 | stats->rx_bytes += buff.pkt[0].len;
|
| 1179 | stats->tx_packets += buff.pkt[1].pkt;
|
| 1180 | stats->tx_bytes += buff.pkt[1].len;
|
| 1181 | u64_stats_update_end(&stats->syncp);
|
| 1182 | /* Ended by AICoder, pid:tbef0151bf4135d1479d0a5d108c870bc756e858 */
|
| 1183 | }else if(tmp == out){
|
| 1184 | /* Started by AICoder, pid:y34f7id6bcs049f144f10bb8a05c9703b196635b */
|
| 1185 | u64_stats_update_begin(&stats->syncp);
|
| 1186 | stats->tx_packets += buff.pkt[0].pkt;
|
| 1187 | stats->tx_bytes += buff.pkt[0].len;
|
| 1188 | stats->rx_packets += buff.pkt[1].pkt;
|
| 1189 | stats->rx_bytes += buff.pkt[1].len;
|
| 1190 | u64_stats_update_end(&stats->syncp);
|
| 1191 | /* Ended by AICoder, pid:y34f7id6bcs049f144f10bb8a05c9703b196635b */
|
| 1192 | }else
|
| 1193 | zv_err("nf_update0 %s->%s!=%s-%s", in->name, out->name, tmp->name, ct->indev[0]->name);
|
| 1194 | }
|
| 1195 | if(ct->indev[1] && is_vlan_dev(ct->indev[1])){
|
| 1196 | struct net_device *tmp = vlan_dev_real_dev(ct->indev[1]);
|
| 1197 | struct vlan_pcpu_stats *stats = this_cpu_ptr(vlan_dev_priv(ct->indev[1])->vlan_pcpu_stats);
|
| 1198 |
|
| 1199 | if(tmp == in){
|
| 1200 | /* Started by AICoder, pid:8bef0t51bfu135d1479d0a5d108c870bc756e858 */
|
| 1201 | u64_stats_update_begin(&stats->syncp);
|
| 1202 | stats->rx_packets += buff.pkt[0].pkt;
|
| 1203 | stats->rx_bytes += buff.pkt[0].len;
|
| 1204 | stats->tx_packets += buff.pkt[1].pkt;
|
| 1205 | stats->tx_bytes += buff.pkt[1].len;
|
| 1206 | u64_stats_update_end(&stats->syncp);
|
| 1207 | /* Ended by AICoder, pid:8bef0t51bfu135d1479d0a5d108c870bc756e858 */
|
| 1208 | }else if(tmp == out){
|
| 1209 | /* Started by AICoder, pid:934f7zd6bcl049f144f10bb8a05c9703b196635b */
|
| 1210 | u64_stats_update_begin(&stats->syncp);
|
| 1211 | stats->tx_packets += buff.pkt[0].pkt;
|
| 1212 | stats->tx_bytes += buff.pkt[0].len;
|
| 1213 | stats->rx_packets += buff.pkt[1].pkt;
|
| 1214 | stats->rx_bytes += buff.pkt[1].len;
|
| 1215 | u64_stats_update_end(&stats->syncp);
|
| 1216 | /* Ended by AICoder, pid:934f7zd6bcl049f144f10bb8a05c9703b196635b */
|
| 1217 | }else
|
| 1218 | zv_err("nf_update1 %s->%s!=%s-%s", in->name, out->name, tmp->name, ct->indev[1]->name);
|
| 1219 | }
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1220 | spin_unlock_bh(&fast_fw_spinlock);
|
| 1221 | zv_info("nf_update %x %d %d %d %d", buff.cap_nfct, buff.pkt[0].pkt, buff.pkt[0].len, buff.pkt[1].pkt, buff.pkt[1].len);
|
| 1222 | }else{
|
| 1223 | zv_info("nf_put %x", buff.cap_nfct);
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1224 | WRITE_ONCE(ct->timeout, nfct_time_stamp);
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1225 | nf_conntrack_put(buff.cap_nfct);
|
| 1226 | }
|
| 1227 | }
|
| 1228 | zv_err("The update thread exit!");
|
| 1229 | return 0;
|
| 1230 | }
|
| 1231 |
|
| 1232 | /*******************************************************************************
|
| 1233 | * Global function implementations *
|
| 1234 | ******************************************************************************/
|
| 1235 | static int __init zvnet_init(void)
|
| 1236 | {
|
| 1237 | int i;
|
| 1238 | int err = -ENOMEM;
|
| 1239 | struct zvnet *dev = NULL;
|
| 1240 | struct net_device *net = NULL;
|
| 1241 | struct zvnet_device *zvnetdev = NULL;
|
| 1242 |
|
| 1243 | #ifdef USE_ZVNET_PACKET
|
| 1244 | skb_queue_head_init(&g_zvnet_skb_xmit_queue);
|
| 1245 | spin_lock_init(&g_zvnet_free_lock);
|
| 1246 | sema_init(&g_zvnet_free_sem, 0);
|
| 1247 | sema_init(&g_zvnet_xmit_sem, 0);
|
| 1248 | #endif
|
| 1249 | skb_queue_head_init(&g_zvnet_skb_queue);
|
| 1250 | g_zvnet_chn_info.core_id = CORE_PS0;
|
| 1251 | g_zvnet_chn_info.channel_id = ICP_CHN_ZVNET1;
|
| 1252 | g_zvnet_chn_info.channel_size = ICP_CHANNEL_SIZE;
|
| 1253 | for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
|
| 1254 | zvnetdev = &zvnet_dev[i];
|
| 1255 | memset(zvnetdev, 0, sizeof(struct zvnet_device));
|
| 1256 | net = alloc_etherdev(sizeof(struct zvnet));
|
| 1257 | if (!net) {
|
| 1258 | zv_err("could not allocate device.\n");
|
| 1259 | return err;
|
| 1260 | }
|
| 1261 |
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1262 | //net->needed_headroom += ZVNET_SKB_PAD;//NET_SKB_PAD;
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1263 | sprintf(net->name, "%s%d", ZVNET_IFNAME_PREFIX, i);
|
| 1264 | dev = v2xnet_dev_init(net, zvnetdev);
|
| 1265 | v2xnet_init_netdev(net);
|
xf.li | 39d9911 | 2024-09-28 04:05:58 -0700 | [diff] [blame^] | 1266 | if(0 == i || i > 8){
|
xf.li | 2f42418 | 2024-08-20 00:47:34 -0700 | [diff] [blame] | 1267 | net->flags = (net->flags & (~IFF_NOARP));
|
| 1268 | }
|
| 1269 | err = register_netdev(net);
|
| 1270 | if (err) {
|
| 1271 | zv_err("register_netdev error:%d :%d\n",err,i);
|
| 1272 | return err;
|
| 1273 | }
|
| 1274 | zvnetdev->dev = dev;
|
| 1275 | zvnetdev->net = net;
|
| 1276 | /*
|
| 1277 | zvnetdev->chn_info.core_id = CAP_ID;
|
| 1278 | zvnetdev->chn_info.channel_id = ICP_CHN_ZVNET1 + i;//zvnet_collect[i];
|
| 1279 | zvnetdev->chn_info.channel_size = ICP_CHANNEL_SIZE;
|
| 1280 | err = zvnet_channel_create(zvnetdev);
|
| 1281 | if(0 != err) {
|
| 1282 | zv_err("zvnet_channel_create error:%d :%d\n",err,i);
|
| 1283 | goto out_unregister_netdev;
|
| 1284 | }
|
| 1285 | */
|
| 1286 | }
|
| 1287 | {
|
| 1288 | struct task_struct *th = NULL;
|
| 1289 | int retval = 0;
|
| 1290 | retval = zvnet_createIcpChannel(CORE_PS0, 21, 64);
|
| 1291 | if(retval < 0) {
|
| 1292 | zv_err("Create IcpChannel channel_21 fail.");
|
| 1293 | return retval;
|
| 1294 | }
|
| 1295 |
|
| 1296 | th = kthread_run(zvnet_update_thread, 0, "zvnet-update%d", 21);
|
| 1297 | if (IS_ERR(th)) {
|
| 1298 | zv_err("Unable to start update thread.");
|
| 1299 | return PTR_ERR(th);
|
| 1300 | }
|
| 1301 | retval = zvnet_createIcpChannel(CORE_PS0, 20, ICP_CHANNEL_SIZE);
|
| 1302 | if(retval < 0) {
|
| 1303 | zv_err("Create IcpChannel channel_20 fail.");
|
| 1304 | return retval;
|
| 1305 | }
|
| 1306 |
|
| 1307 | th = kthread_run(zvnet_receive_thread, 0, "zvnet-recv%d", 20);
|
| 1308 | if (IS_ERR(th)) {
|
| 1309 | zv_err("Unable to start receive thread.");
|
| 1310 | return PTR_ERR(th);
|
| 1311 | }
|
| 1312 | g_zvnet_chn_info.rcv_thread = th;
|
| 1313 |
|
| 1314 | retval = zvnet_createIcpChannel(CORE_PS0, 29, ICP_CHANNEL_SIZE);
|
| 1315 | if(retval < 0) {
|
| 1316 | zv_err("Create IcpChannel channel_29 fail.");
|
| 1317 | return retval;
|
| 1318 | }
|
| 1319 |
|
| 1320 | th = kthread_run(zvnet_release_thread, 0, "zvnet-free%d", 29);
|
| 1321 | if (IS_ERR(th)) {
|
| 1322 | zv_err("Unable to start release thread.");
|
| 1323 | return PTR_ERR(th);
|
| 1324 | }
|
| 1325 | #ifdef USE_ZVNET_PACKET
|
| 1326 | th = kthread_run(zvnet_xmit_warp_thread, 0, "zvnet-xmit-wrap");
|
| 1327 | if (IS_ERR(th)) {
|
| 1328 | zv_err("Unable to start xmit_warp thread.");
|
| 1329 | return PTR_ERR(th);
|
| 1330 | }
|
| 1331 |
|
| 1332 | th = kthread_run(zvnet_free_warp_thread, 0, "zvnet-free-wrap");
|
| 1333 | if (IS_ERR(th)) {
|
| 1334 | zv_err("Unable to start free_warp thread.");
|
| 1335 | return PTR_ERR(th);
|
| 1336 | }
|
| 1337 | #endif
|
| 1338 | vir_addr_ap = ioremap_cache(DDR_BASE_ADDR_AP, DDR_BASE_LEN_AP);
|
| 1339 | zv_warn("vir_addr_ap vir=0x%x phy=0x%x len=0x%x", vir_addr_ap, DDR_BASE_ADDR_AP, DDR_BASE_LEN_AP);
|
| 1340 | if(vir_addr_ap == NULL)
|
| 1341 | {
|
| 1342 | zv_err("AP mmap failed.\n");
|
| 1343 | return -1;
|
| 1344 | }
|
| 1345 |
|
| 1346 | }
|
| 1347 | zv_dbg("success.\n");
|
| 1348 | return 0;
|
| 1349 |
|
| 1350 | }
|
| 1351 |
|
| 1352 | static void __exit zvnet_exit(void)
|
| 1353 | {
|
| 1354 | int i;
|
| 1355 | struct net_device *net;
|
| 1356 |
|
| 1357 | for (i = 0; i < DDR_ZVNET_DEV_MAX; i++) {
|
| 1358 | net = zvnet_dev[i].net;
|
| 1359 | unregister_netdev(net);
|
| 1360 | free_netdev(net);
|
| 1361 | zvnet_dev[i].net = NULL;
|
| 1362 | }
|
| 1363 | zv_warn("success.\n");
|
| 1364 | }
|
| 1365 |
|
| 1366 | late_initcall(zvnet_init);
|
| 1367 | module_exit(zvnet_exit);
|
| 1368 |
|
| 1369 | MODULE_AUTHOR("ZXIC");
|
| 1370 | MODULE_DESCRIPTION("ZXIC CAP LAN NET DEVICE");
|
| 1371 | MODULE_LICENSE("GPL");
|
| 1372 |
|