b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * asr netdevice skb ring buffer driver |
| 3 | * |
| 4 | * Copyright (C) 2020 ASR Micro Limited |
| 5 | * |
| 6 | */ |
| 7 | |
| 8 | #include <linux/etherdevice.h> |
| 9 | #include <linux/ethtool.h> |
| 10 | #include <linux/in.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/ip.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/phy.h> |
| 17 | #include <linux/platform_device.h> |
| 18 | #include <linux/types.h> |
| 19 | #include <asm/atomic.h> |
| 20 | #include <linux/skbrb.h> |
| 21 | |
| 22 | /* minimal memory requaired for skbrb */ |
| 23 | #define SKBRB_MIN_MEM_REQUIRE (64 * 1024 * 1024) |
| 24 | #define SKBRB_MIN_FREE_SLOTS 10 |
| 25 | |
| 26 | static struct skbrb skb_rb[SKBRB_TYPE_MAX]; |
| 27 | |
| 28 | static inline void skbrb_slot_free(void *data, void *ptr __maybe_unused, |
| 29 | size_t len __maybe_unused) |
| 30 | { |
| 31 | struct skbrb_slot *rx_slot = (struct skbrb_slot *)data; |
| 32 | struct skbrb *rb = (struct skbrb *)&skb_rb[rx_slot->type]; |
| 33 | |
| 34 | rx_slot->inuse = false; |
| 35 | atomic_inc(&rb->free_cnt); |
| 36 | } |
| 37 | |
| 38 | static struct sk_buff *__skbrb_alloc_skb(int type, unsigned int length) |
| 39 | { |
| 40 | struct sk_buff *skb = NULL; |
| 41 | struct skbrb *rb = (struct skbrb *)&skb_rb[type]; |
| 42 | int curr_slot; |
| 43 | int i = 0; |
| 44 | |
| 45 | if (unlikely(!rb->addr)) { |
| 46 | skb = dev_alloc_skb(length); |
| 47 | return skb; |
| 48 | } |
| 49 | |
| 50 | spin_lock(&rb->lock); |
| 51 | curr_slot = rb->curr_slot; |
| 52 | do { |
| 53 | if (rb->rx_slots[curr_slot].inuse == false) |
| 54 | break; |
| 55 | |
| 56 | if (atomic_read(&rb->free_cnt) < SKBRB_MIN_FREE_SLOTS) { |
| 57 | spin_unlock(&rb->lock); |
| 58 | return NULL; |
| 59 | } |
| 60 | |
| 61 | curr_slot++; |
| 62 | if (curr_slot == rb->slot_cnt) |
| 63 | curr_slot = 0; |
| 64 | i++; |
| 65 | BUG_ON(i > rb->slot_cnt); |
| 66 | } while (1); |
| 67 | rb->rx_slots[curr_slot].inuse = true; |
| 68 | rb->curr_slot = curr_slot; |
| 69 | rb->curr_slot++; |
| 70 | if (rb->curr_slot == rb->slot_cnt) |
| 71 | rb->curr_slot = 0; |
| 72 | |
| 73 | spin_unlock(&rb->lock); |
| 74 | atomic_dec(&rb->free_cnt); |
| 75 | |
| 76 | skb = alloc_skb_p(rb->rx_slots[curr_slot].offset, |
| 77 | rb->slot_size, skbrb_slot_free, |
| 78 | &rb->rx_slots[curr_slot], GFP_ATOMIC); |
| 79 | if (skb) |
| 80 | /* bypass the skb copy operation in fastpath & pipe */ |
| 81 | GET_SKBRB_CB(skb)->flag = SKB_FROM_RB_BIT; |
| 82 | else |
| 83 | /* Put back this slot */ |
| 84 | rb->rx_slots[curr_slot].inuse = false; |
| 85 | |
| 86 | return skb; |
| 87 | } |
| 88 | |
| 89 | static struct sk_buff *skbrb_alloc_skb(int type, unsigned int length) |
| 90 | { |
| 91 | struct sk_buff *skb; |
| 92 | |
| 93 | skb = __skbrb_alloc_skb(type, length); |
| 94 | if (!skb) |
| 95 | skb = dev_alloc_skb(length); |
| 96 | |
| 97 | return skb; |
| 98 | } |
| 99 | |
| 100 | static int skbrb_init(int type, int slot_size, int slot_cnt) |
| 101 | { |
| 102 | int i; |
| 103 | struct skbrb *rb; |
| 104 | |
| 105 | #if 0 |
| 106 | if (totalram_pages() < (SKBRB_MIN_MEM_REQUIRE / PAGE_SIZE)) { |
| 107 | pr_warn("minimal memory required for skbrb is %dMB.\n", |
| 108 | SKBRB_MIN_MEM_REQUIRE / 1024 / 1024); |
| 109 | return -1; |
| 110 | } |
| 111 | #endif |
| 112 | rb = (struct skbrb *)&skb_rb[type]; |
| 113 | if (rb->addr) { |
| 114 | pr_info("skb rx ring buffer exist already.\n"); |
| 115 | return 0; |
| 116 | } |
| 117 | |
| 118 | spin_lock_init(&rb->lock); |
| 119 | rb->slot_size = slot_size; |
| 120 | rb->slot_cnt = slot_cnt; |
| 121 | rb->addr = (char *)kmalloc(slot_size * slot_cnt, GFP_KERNEL); |
| 122 | if (!rb->addr) { |
| 123 | pr_err("failed to allocate rx ring buffer memory\n"); |
| 124 | return -1; |
| 125 | } |
| 126 | |
| 127 | rb->rx_slots = (struct skbrb_slot *)kmalloc( |
| 128 | slot_cnt * sizeof(struct skbrb_slot), GFP_KERNEL); |
| 129 | if (!rb->rx_slots) { |
| 130 | pr_err("failed to allocate rx slots\n"); |
| 131 | kfree(rb->addr); |
| 132 | rb->addr = NULL; |
| 133 | return -1; |
| 134 | } |
| 135 | |
| 136 | for (i = 0; i < slot_cnt; i++) { |
| 137 | rb->rx_slots[i].index = i; |
| 138 | rb->rx_slots[i].inuse = false; |
| 139 | rb->rx_slots[i].offset = rb->addr + slot_size * i; |
| 140 | rb->rx_slots[i].type = type; |
| 141 | } |
| 142 | |
| 143 | atomic_set(&rb->free_cnt, slot_cnt); |
| 144 | rb->curr_slot = 0; |
| 145 | pr_info("skbrb_init(0x%x): type: %d, slot size: %d, slot cnt: %d. Done.\n", |
| 146 | (unsigned int)rb->addr, type, slot_size, slot_cnt); |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static void skbrb_release(int type) |
| 151 | { |
| 152 | struct skbrb *rb = (struct skbrb *)&skb_rb[type]; |
| 153 | if (rb->rx_slots) |
| 154 | kfree(rb->rx_slots); |
| 155 | if (rb->addr) |
| 156 | kfree(rb->addr); |
| 157 | memset(rb, 0, sizeof(struct skbrb)); |
| 158 | } |
| 159 | |
| 160 | struct sk_buff *emac_skbrb_alloc_skb(unsigned int length) |
| 161 | { |
| 162 | return __skbrb_alloc_skb(SKBRB_TYPE_EMAC, length); |
| 163 | } |
| 164 | |
| 165 | struct sk_buff *wifi_skbrb_alloc_skb(unsigned int length) |
| 166 | { |
| 167 | return skbrb_alloc_skb(SKBRB_TYPE_WIFI, length); |
| 168 | } |
| 169 | |
| 170 | int emac_skbrb_init(int slot_size, int slot_cnt) |
| 171 | { |
| 172 | return skbrb_init(SKBRB_TYPE_EMAC, slot_size, slot_cnt); |
| 173 | } |
| 174 | |
| 175 | void emac_skbrb_release(void) |
| 176 | { |
| 177 | skbrb_release(SKBRB_TYPE_EMAC); |
| 178 | } |
| 179 | |
| 180 | int wifi_skbrb_init(int slot_size, int slot_cnt) |
| 181 | { |
| 182 | return skbrb_init(SKBRB_TYPE_WIFI, slot_size, slot_cnt); |
| 183 | } |
| 184 | |
| 185 | void wifi_skbrb_release(void) |
| 186 | { |
| 187 | skbrb_release(SKBRB_TYPE_WIFI); |
| 188 | } |