blob: 188e61584bbcd810c33073a51986ca0bad6a7fd0 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2014 Christopher Anderson
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <assert.h>
24#include <lib/console.h>
25#include <debug.h>
26#include <list.h>
27#include <err.h>
28#include <errno.h>
29#include <reg.h>
30#include <endian.h>
31#include <stdio.h>
32#include <string.h>
33#include <malloc.h>
34#include <trace.h>
35#include <bits.h>
36#include <pow2.h>
37#include <sys/types.h>
38#include <lib/cbuf.h>
39#include <kernel/timer.h>
40#include <kernel/thread.h>
41#include <kernel/vm.h>
42#include <kernel/spinlock.h>
43#include <kernel/debug.h>
44#include <platform/interrupts.h>
45#include <platform/debug.h>
46#include <platform/gem.h>
47#include <platform.h>
48#include <kernel/event.h>
49#include <kernel/semaphore.h>
50
51#include <lib/pktbuf.h>
52#include <lib/pool.h>
53
54#define LOCAL_TRACE 0
55
56/* Allow targets to override these values */
57#ifndef GEM_RX_DESC_CNT
58#define GEM_RX_DESC_CNT 32
59#endif
60
61#ifndef GEM_TX_DESC_CNT
62#define GEM_TX_DESC_CNT 32
63#endif
64
65#ifndef GEM_RX_BUF_SIZE
66#define GEM_RX_BUF_SIZE 1536
67#endif
68
69#ifndef GEM_TX_BUF_SIZE
70#define GEM_TX_BUF_SIZE 1536
71#endif
72
73pool_t rx_buf_pool;
74static spin_lock_t lock = SPIN_LOCK_INITIAL_VALUE;
75
76struct gem_desc {
77 uint32_t addr;
78 uint32_t ctrl;
79};
80
81/* Quick overview:
82 * RX:
83 * rx_tbl contains rx descriptors. A pktbuf is allocated for each of these and a descriptor
84 * entry in the table points to a buffer in the pktbuf. rx_tbl[X]'s pktbuf is stored in rx_pbufs[X]
85 *
86 * TX:
87 * The current position to write new tx descriptors to is maintained by gem.tx_head. As frames are
88 * queued in tx_tbl their pktbufs are stored in the list queued_pbufs. As frame transmission is
89 * completed these pktbufs are released back to the pool by the interrupt handler for TX_COMPLETE
90 */
91struct gem_descs {
92 struct gem_desc rx_tbl[GEM_RX_DESC_CNT];
93 struct gem_desc tx_tbl[GEM_TX_DESC_CNT];
94};
95
96struct gem_state {
97 volatile struct gem_regs *regs;
98
99 struct gem_descs *descs;
100 paddr_t descs_phys;
101
102 unsigned int tx_head;
103 unsigned int tx_tail;
104 unsigned int tx_count;
105 struct list_node tx_queue;
106 struct list_node queued_pbufs;
107
108 gem_cb_t rx_callback;
109 event_t rx_pending;
110 event_t tx_complete;
111 bool debug_rx;
112 pktbuf_t *rx_pbufs[GEM_RX_DESC_CNT];
113};
114
115struct gem_state gem;
116
117static void debug_rx_handler(pktbuf_t *p)
118{
119 static uint32_t pkt = 0;
120
121 printf("[%10u] packet %u, %zu bytes:\n", (uint32_t)current_time(), ++pkt, p->dlen);
122 hexdump8(p->data, p->dlen);
123 putchar('\n');
124}
125
126static int free_completed_pbuf_frames(void) {
127 int ret = 0;
128
129 gem.regs->tx_status = gem.regs->tx_status;
130
131 while (gem.tx_count > 0 &&
132 (gem.descs->tx_tbl[gem.tx_tail].ctrl & TX_DESC_USED)) {
133
134 bool eof;
135 do {
136 pktbuf_t *p = list_remove_head_type(&gem.queued_pbufs, pktbuf_t, list);
137 DEBUG_ASSERT(p);
138 eof = p->flags & PKTBUF_FLAG_EOF;
139 ret += pktbuf_free(p, false);
140 } while (!eof);
141
142 gem.tx_tail = (gem.tx_tail + 1) % GEM_TX_DESC_CNT;
143 gem.tx_count--;
144 }
145
146 return ret;
147}
148
149void queue_pkts_in_tx_tbl(void) {
150 pktbuf_t *p;
151 unsigned int cur_pos;
152
153 if (list_is_empty(&gem.tx_queue)) {
154 return;
155 }
156
157 // XXX handle multi part buffers
158
159 /* Queue packets in the descriptor table until we're either out of space in the table
160 * or out of packets in our tx queue. Any packets left will remain in the list and be
161 * processed the next time available */
162 while (gem.tx_count < GEM_TX_DESC_CNT &&
163 ((p = list_remove_head_type(&gem.tx_queue, pktbuf_t, list)) != NULL)) {
164 cur_pos = gem.tx_head;
165
166 uint32_t addr = pktbuf_data_phys(p);
167 uint32_t ctrl = gem.descs->tx_tbl[cur_pos].ctrl & TX_DESC_WRAP; /* protect the wrap bit */
168 ctrl |= TX_BUF_LEN(p->dlen);
169
170 DEBUG_ASSERT(p->flags & PKTBUF_FLAG_EOF); // a multi part buffer would have caused a race condition w/hardware
171 if (p->flags & PKTBUF_FLAG_EOF) {
172 ctrl |= TX_LAST_BUF;
173 }
174
175 /* fill in the descriptor, control word last (in case hardware is racing us) */
176 gem.descs->tx_tbl[cur_pos].addr = addr;
177 gem.descs->tx_tbl[cur_pos].ctrl = ctrl;
178
179 gem.tx_head = (gem.tx_head + 1) % GEM_TX_DESC_CNT;
180 gem.tx_count++;
181 list_add_tail(&gem.queued_pbufs, &p->list);
182 }
183
184 DMB;
185 gem.regs->net_ctrl |= NET_CTRL_START_TX;
186}
187
188int gem_send_raw_pkt(struct pktbuf *p)
189{
190 status_t ret = NO_ERROR;
191
192 if (!p || !p->dlen) {
193 ret = -1;
194 goto err;
195 }
196
197 /* make sure the output buffer is fully written to memory before
198 * placing on the outgoing list. */
199
200 // XXX handle multi part buffers
201 arch_clean_cache_range((vaddr_t)p->data, p->dlen);
202
203 spin_lock_saved_state_t irqstate;
204 spin_lock_irqsave(&lock, irqstate);
205 list_add_tail(&gem.tx_queue, &p->list);
206 queue_pkts_in_tx_tbl();
207 spin_unlock_irqrestore(&lock, irqstate);
208
209err:
210 return ret;
211}
212
213
214enum handler_return gem_int_handler(void *arg) {
215 uint32_t intr_status;
216 bool resched = false;
217
218 intr_status = gem.regs->intr_status;
219
220 spin_lock(&lock);
221
222 while (intr_status) {
223 // clear any pending status
224 gem.regs->intr_status = intr_status;
225
226 // Received an RX complete
227 if (intr_status & INTR_RX_COMPLETE) {
228 event_signal(&gem.rx_pending, false);
229
230 gem.regs->rx_status |= INTR_RX_COMPLETE;
231
232 resched = true;
233 }
234
235 if (intr_status & INTR_RX_USED_READ) {
236
237 for (int i = 0; i < GEM_RX_DESC_CNT; i++) {
238 gem.descs->rx_tbl[i].addr &= ~RX_DESC_USED;
239 }
240
241 gem.regs->rx_status &= ~RX_STATUS_BUFFER_NOT_AVAIL;
242 gem.regs->net_ctrl &= ~NET_CTRL_RX_EN;
243 gem.regs->net_ctrl |= NET_CTRL_RX_EN;
244 printf("GEM overflow, dumping pending packets\n");
245 }
246
247 if (intr_status & INTR_TX_CORRUPT) {
248 printf("tx ahb error!\n");
249 if (free_completed_pbuf_frames() > 0) {
250 resched = true;
251 }
252 }
253
254 /* A frame has been completed so we can clean up ownership of its buffers */
255 if (intr_status & INTR_TX_COMPLETE) {
256 if (free_completed_pbuf_frames() > 0) {
257 resched = true;
258 }
259 }
260
261 /* The controller has processed packets until it hit a buffer owned by the driver */
262 if (intr_status & INTR_TX_USED_READ) {
263 queue_pkts_in_tx_tbl();
264 gem.regs->tx_status |= TX_STATUS_USED_READ;
265 }
266
267 /* see if we have any more */
268 intr_status = gem.regs->intr_status;
269 }
270
271 spin_unlock(&lock);
272
273 return (resched) ? INT_RESCHEDULE : INT_NO_RESCHEDULE;
274}
275
276static bool wait_for_phy_idle(void)
277{
278 int iters = 1000;
279 while (iters && !(gem.regs->net_status & NET_STATUS_PHY_MGMT_IDLE)) {
280 iters--;
281 }
282
283 if (iters == 0) {
284 return false;
285 }
286
287 return true;
288}
289
290static bool gem_phy_init(void) {
291 return wait_for_phy_idle();
292}
293
294static status_t gem_cfg_buffer_descs(void)
295{
296 void *rx_buf_vaddr;
297 status_t ret;
298
299
300 if ((ret = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "gem_rx_bufs",
301 GEM_RX_DESC_CNT * GEM_RX_BUF_SIZE, (void **) &rx_buf_vaddr, 0, 0,
302 ARCH_MMU_FLAG_CACHED)) < 0) {
303 return ret;
304 }
305
306 /* Take pktbufs from the allocated target pool and assign them to the gem RX
307 * descriptor table */
308 pool_init(&rx_buf_pool, GEM_RX_BUF_SIZE, CACHE_LINE, GEM_RX_DESC_CNT, rx_buf_vaddr);
309 for (unsigned int n = 0; n < GEM_RX_DESC_CNT; n++) {
310 void *b = pool_alloc(&rx_buf_pool);
311 pktbuf_t *p = pktbuf_alloc_empty();
312 if (!p || !b) {
313 return -1;
314 }
315
316 pktbuf_add_buffer(p, b, GEM_RX_BUF_SIZE, 0, PKTBUF_FLAG_CACHED, NULL, NULL);
317 gem.rx_pbufs[n] = p;
318 gem.descs->rx_tbl[n].addr = (uintptr_t) p->phys_base;
319 gem.descs->rx_tbl[n].ctrl = 0;
320 }
321
322 /* Claim ownership of TX descriptors for the driver */
323 for (unsigned i = 0; i < GEM_TX_DESC_CNT; i++) {
324 gem.descs->tx_tbl[i].addr = 0;
325 gem.descs->tx_tbl[i].ctrl = TX_DESC_USED;
326 }
327
328 /* Both set of descriptors need wrap bits set at the end of their tables*/
329 gem.descs->rx_tbl[GEM_RX_DESC_CNT-1].addr |= RX_DESC_WRAP;
330 gem.descs->tx_tbl[GEM_TX_DESC_CNT-1].ctrl |= TX_DESC_WRAP;
331
332 /* Point the controller at the offset into state's physical location for RX descs */
333 gem.regs->rx_qbar = ((uintptr_t)&gem.descs->rx_tbl[0] - (uintptr_t)gem.descs) + gem.descs_phys;
334 gem.regs->tx_qbar = ((uintptr_t)&gem.descs->tx_tbl[0] - (uintptr_t)gem.descs) + gem.descs_phys;
335
336 return NO_ERROR;
337}
338
339static void gem_cfg_ints(void)
340{
341 uint32_t gem_base = (uintptr_t)gem.regs;
342
343 if (gem_base == GEM0_BASE) {
344 register_int_handler(ETH0_INT, gem_int_handler, NULL);
345 unmask_interrupt(ETH0_INT);
346 } else if (gem_base == GEM1_BASE) {
347 register_int_handler(ETH1_INT, gem_int_handler, NULL);
348 unmask_interrupt(ETH1_INT);
349 } else {
350 printf("Illegal gem periph base address 0x%08X!\n", gem_base);
351 return;
352 }
353
354 /* Enable all interrupts */
355 gem.regs->intr_en = INTR_RX_COMPLETE | INTR_TX_COMPLETE | INTR_HRESP_NOT_OK | INTR_MGMT_SENT |
356 INTR_RX_USED_READ | INTR_TX_CORRUPT | INTR_TX_USED_READ | INTR_RX_OVERRUN;
357}
358
359int gem_rx_thread(void *arg)
360{
361 pktbuf_t *p;
362 int bp = 0;
363
364 while (1) {
365 event_wait(&gem.rx_pending);
366
367 for (;;) {
368 if (gem.descs->rx_tbl[bp].addr & RX_DESC_USED) {
369 uint32_t ctrl = gem.descs->rx_tbl[bp].ctrl;
370
371 p = gem.rx_pbufs[bp];
372 p->dlen = RX_BUF_LEN(ctrl);
373 p->data = p->buffer + 2;
374
375 /* copy the checksum offloading bits */
376 p->flags = 0;
377 p->flags |= (BITS_SHIFT(ctrl, 23, 22) != 0) ? PKTBUF_FLAG_CKSUM_IP_GOOD : 0;
378 p->flags |= (BITS_SHIFT(ctrl, 23, 22) == 1) ? PKTBUF_FLAG_CKSUM_UDP_GOOD : 0;
379 p->flags |= (BITS_SHIFT(ctrl, 23, 22) == 2) ? PKTBUF_FLAG_CKSUM_TCP_GOOD : 0;
380
381 /* invalidate any stale cache lines on the receive buffer to ensure
382 * the cpu has a fresh copy of incomding data. */
383 arch_invalidate_cache_range((vaddr_t)p->data, p->dlen);
384
385 if (unlikely(gem.debug_rx)) {
386 debug_rx_handler(p);
387 }
388
389 if (likely(gem.rx_callback)) {
390 gem.rx_callback(p);
391 }
392
393 /* make sure all dirty data is flushed out of the buffer before
394 * putting into the receive queue */
395 arch_clean_invalidate_cache_range((vaddr_t)p->buffer, PKTBUF_SIZE);
396
397 gem.descs->rx_tbl[bp].addr &= ~RX_DESC_USED;
398 gem.descs->rx_tbl[bp].ctrl = 0;
399 bp = (bp + 1) % GEM_RX_DESC_CNT;
400 } else {
401 break;
402 }
403 }
404 }
405
406 return 0;
407}
408
409
410int gem_stat_thread(void *arg) {
411 volatile bool *run = ((bool *)arg);
412 static uint32_t frames_rx = 0, frames_tx = 0;
413
414 while (*run) {
415 frames_tx += gem.regs->frames_tx;
416 frames_rx += gem.regs->frames_rx;
417 printf("GEM tx_head %u, tx_tail %u, tx_count %u, tx_frames %u, rx_frames %u\n",
418 gem.tx_head, gem.tx_tail, gem.tx_count, frames_tx, frames_rx);
419 thread_sleep(1000);
420 }
421
422 return 0;
423}
424
425void gem_deinit(uintptr_t base)
426{
427 /* reset the gem peripheral */
428 uint32_t rst_mask;
429 if (base == GEM0_BASE) {
430 rst_mask = (1<<6) | (1<<4) | (1<<0);
431 } else {
432 rst_mask = (1<<7) | (1<<5) | (1<<1);
433 }
434 SLCR->GEM_RST_CTRL |= rst_mask;
435 spin(1);
436 SLCR->GEM_RST_CTRL &= ~rst_mask;
437
438
439 /* Clear Network control / status registers */
440 gem.regs->net_ctrl |= NET_CTRL_STATCLR;
441 gem.regs->rx_status = 0x0F;
442 gem.regs->tx_status = 0xFF;
443 /* Disable interrupts */
444 gem.regs->intr_dis = 0x7FFFEFF;
445
446 /* Empty out the buffer queues */
447 gem.regs->rx_qbar = 0;
448 gem.regs->tx_qbar = 0;
449}
450
451status_t gem_init(uintptr_t gem_base)
452{
453 status_t ret;
454 uint32_t reg_val;
455 thread_t *rx_thread;
456 void *descs_vaddr;
457 paddr_t descs_paddr;
458
459 DEBUG_ASSERT(gem_base == GEM0_BASE || gem_base == GEM1_BASE);
460
461 /* Data structure init */
462 event_init(&gem.tx_complete, false, EVENT_FLAG_AUTOUNSIGNAL);
463 event_init(&gem.rx_pending, false, EVENT_FLAG_AUTOUNSIGNAL);
464 list_initialize(&gem.queued_pbufs);
465 list_initialize(&gem.tx_queue);
466
467 /* allocate a block of uncached contiguous memory for the peripheral descriptors */
468 if ((ret = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "gem_desc",
469 sizeof(*gem.descs), &descs_vaddr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE)) < 0) {
470 return ret;
471 }
472 descs_paddr = kvaddr_to_paddr((void *)descs_vaddr);
473
474 /* tx/rx descriptor tables and memory mapped registers */
475 gem.descs = (void *)descs_vaddr;
476 gem.descs_phys = descs_paddr;
477 gem.regs = (struct gem_regs *)gem_base;
478
479 /* rx background thread */
480 rx_thread = thread_create("gem_rx", gem_rx_thread, NULL, HIGH_PRIORITY, DEFAULT_STACK_SIZE);
481 thread_resume(rx_thread);
482
483 /* Bring whatever existing configuration is up down so we can do it cleanly */
484 gem_deinit(gem_base);
485 gem_cfg_buffer_descs();
486
487 /* Self explanatory configuration for the gige */
488 reg_val = NET_CFG_FULL_DUPLEX;
489 reg_val |= NET_CFG_GIGE_EN;
490 reg_val |= NET_CFG_SPEED_100;
491 reg_val |= NET_CFG_RX_CHKSUM_OFFLD_EN;
492 reg_val |= NET_CFG_FCS_REMOVE;
493 reg_val |= NET_CFG_MDC_CLK_DIV(0x7);
494 reg_val |= NET_CFG_RX_BUF_OFFSET(2);
495 gem.regs->net_cfg = reg_val;
496
497 /* Set DMA to 1600 byte rx buffer, 8KB addr space for rx, 4KB addr space for tx,
498 * hw checksumming, little endian, and use INCR16 ahb bursts
499 */
500 reg_val = DMA_CFG_AHB_MEM_RX_BUF_SIZE(0x19);
501 reg_val |= DMA_CFG_RX_PKTBUF_MEMSZ_SEL(0x3);
502 reg_val |= DMA_CFG_TX_PKTBUF_MEMSZ_SEL;
503 reg_val |= DMA_CFG_CSUM_GEN_OFFLOAD_EN;
504 reg_val |= DMA_CFG_AHB_FIXED_BURST_LEN(0x10);
505 gem.regs->dma_cfg = reg_val;
506
507 /* Enable VREF from GPIOB */
508 SLCR_REG(GPIOB_CTRL) = 0x1;
509
510 ret = gem_phy_init();
511 if (!ret) {
512 printf("Phy not idle, aborting!\n");
513 return ret;
514 }
515
516 gem_cfg_ints();
517
518 reg_val = NET_CTRL_MD_EN;
519 reg_val |= NET_CTRL_RX_EN;
520 reg_val |= NET_CTRL_TX_EN;
521 gem.regs->net_ctrl = reg_val;
522
523 return NO_ERROR;
524}
525
526void gem_disable(void)
527{
528 /* disable all the interrupts */
529 gem.regs->intr_en = 0;
530 mask_interrupt(ETH0_INT);
531
532 /* stop tx and rx */
533 gem.regs->net_ctrl = 0;
534}
535
536void gem_set_callback(gem_cb_t rx)
537{
538 gem.rx_callback = rx;
539}
540
541void gem_set_macaddr(uint8_t mac[6]) {
542 uint32_t en = gem.regs->net_ctrl &= NET_CTRL_RX_EN | NET_CTRL_TX_EN;
543
544 if (en) {
545 gem.regs->net_ctrl &= ~(en);
546 }
547
548 /* _top register must be written after _bot register */
549 gem.regs->spec_addr1_bot = (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0];
550 gem.regs->spec_addr1_top = (mac[5] << 8) | mac[4];
551
552 if (en) {
553 gem.regs->net_ctrl |= en;
554 }
555}
556
557
558/* Debug console commands */
559static int cmd_gem(int argc, const cmd_args *argv)
560{
561 static uint32_t frames_rx = 0;
562 static uint32_t frames_tx = 0;
563 static bool run_stats = false;
564 thread_t *stat_thread;
565
566 if (argc == 1) {
567 printf("gem raw <iter> <length>: Send <iter> raw mac packet for testing\n");
568 printf("gem rx_debug: toggle RX debug output\n");
569 printf("gem stats toggle periodic output of driver stats\n");
570 printf("gem status: print driver status\n");
571 } else if (strncmp(argv[1].str, "rx_debug", sizeof("rx_debug")) == 0) {
572 pktbuf_t *p;
573 int iter;
574 if (argc < 4) {
575 return 0;
576 }
577
578 if ((p = pktbuf_alloc()) == NULL) {
579 printf("out of buffers\n");
580 }
581
582 iter = argv[2].u;
583 p->dlen = argv[3].u;
584 while (iter--) {
585 memset(p->data, iter, 12);
586 gem_send_raw_pkt(p);
587 }
588 } else if (strncmp(argv[1].str, "status", sizeof("status")) == 0) {
589 uint32_t mac_top = gem.regs->spec_addr1_top;
590 uint32_t mac_bot = gem.regs->spec_addr1_bot;
591 printf("mac addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
592 mac_top >> 8, mac_top & 0xFF, mac_bot >> 24, (mac_bot >> 16) & 0xFF,
593 (mac_bot >> 8) & 0xFF, mac_bot & 0xFF);
594 uint32_t rx_used = 0, tx_used = 0;
595 for (int i = 0; i < GEM_RX_DESC_CNT; i++) {
596 rx_used += !!(gem.descs->rx_tbl[i].addr & RX_DESC_USED);
597 }
598
599 for (int i = 0; i < GEM_TX_DESC_CNT; i++) {
600 tx_used += !!(gem.descs->tx_tbl[i].ctrl & TX_DESC_USED);
601 }
602
603 frames_tx += gem.regs->frames_tx;
604 frames_rx += gem.regs->frames_rx;
605 printf("rx usage: %u/%u, tx usage %u/%u\n",
606 rx_used, GEM_RX_DESC_CNT, tx_used, GEM_TX_DESC_CNT);
607 printf("frames rx: %u, frames tx: %u\n",
608 frames_rx, frames_tx);
609 printf("tx:\n");
610 for (size_t i = 0; i < GEM_TX_DESC_CNT; i++) {
611 uint32_t ctrl = gem.descs->tx_tbl[i].ctrl;
612 uint32_t addr = gem.descs->tx_tbl[i].addr;
613
614 printf("%3zu 0x%08X 0x%08X: len %u, %s%s%s %s%s\n",
615 i, addr, ctrl, TX_BUF_LEN(ctrl),
616 (ctrl & TX_DESC_USED) ? "driver " : "controller ",
617 (ctrl & TX_DESC_WRAP) ? "wrap " : "",
618 (ctrl & TX_LAST_BUF) ? "eof " : "",
619 (i == gem.tx_head) ? "<-- HEAD " : "",
620 (i == gem.tx_tail) ? "<-- TAIL " : "");
621 }
622
623 } else if (strncmp(argv[1].str, "stats", sizeof("stats")) == 0) {
624 run_stats = !run_stats;
625 if (run_stats) {
626 stat_thread = thread_create("gem_stat",
627 gem_stat_thread, &run_stats, LOW_PRIORITY, DEFAULT_STACK_SIZE);
628 thread_resume(stat_thread);
629 }
630 } else if (argv[1].str[0] == 'd') {
631 gem.debug_rx = !gem.debug_rx;
632 }
633
634 return 0;
635}
636
637STATIC_COMMAND_START
638STATIC_COMMAND("gem", "ZYNQ GEM commands", &cmd_gem)
639STATIC_COMMAND_END(gem);