blob: 7255d6838791a73ff40a4aa78885f33d07e4e389 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3
4#include <linux/kernel.h>
5#include <linux/jiffies.h>
6#include <linux/delay.h>
7#include <linux/io.h>
8#include <linux/etherdevice.h>
9#include <linux/platform_device.h>
10#include "mtk_ppe.h"
11#include "mtk_ppe_regs.h"
12
13static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
14{
15 writel(val, ppe->base + reg);
16}
17
18static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
19{
20 return readl(ppe->base + reg);
21}
22
23static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
24{
25 u32 val;
26
27 val = ppe_r32(ppe, reg);
28 val &= ~mask;
29 val |= set;
30 ppe_w32(ppe, reg, val);
31
32 return val;
33}
34
35static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
36{
37 return ppe_m32(ppe, reg, 0, val);
38}
39
40static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
41{
42 return ppe_m32(ppe, reg, val, 0);
43}
44
45static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
46{
47 unsigned long timeout = jiffies + HZ;
48
49 while (time_is_after_jiffies(timeout)) {
50 if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY))
51 return 0;
52
53 usleep_range(10, 20);
54 }
55
56 dev_err(ppe->dev, "PPE table busy");
57
58 return -ETIMEDOUT;
59}
60
61static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
62{
63 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
64 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
65}
66
67static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
68{
69 mtk_ppe_cache_clear(ppe);
70
71 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
72 enable * MTK_PPE_CACHE_CTL_EN);
73}
74
75static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
76{
77 u32 hv1, hv2, hv3;
78 u32 hash;
79
80 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
81 case MTK_PPE_PKT_TYPE_BRIDGE:
82 hv1 = e->bridge.src_mac_lo;
83 hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
84 hv2 = e->bridge.src_mac_hi >> 16;
85 hv2 ^= e->bridge.dest_mac_lo;
86 hv3 = e->bridge.dest_mac_hi;
87 break;
88 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
89 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
90 hv1 = e->ipv4.orig.ports;
91 hv2 = e->ipv4.orig.dest_ip;
92 hv3 = e->ipv4.orig.src_ip;
93 break;
94 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
95 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
96 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
97 hv1 ^= e->ipv6.ports;
98
99 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
100 hv2 ^= e->ipv6.dest_ip[0];
101
102 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
103 hv3 ^= e->ipv6.src_ip[0];
104 break;
105 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
106 case MTK_PPE_PKT_TYPE_IPV6_6RD:
107 default:
108 WARN_ON_ONCE(1);
109 return MTK_PPE_HASH_MASK;
110 }
111
112 hash = (hv1 & hv2) | ((~hv1) & hv3);
113 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
114 hash ^= hv1 ^ hv2 ^ hv3;
115 hash ^= hash >> 16;
116 hash <<= 1;
117 hash &= MTK_PPE_ENTRIES - 1;
118
119 return hash;
120}
121
122static inline struct mtk_foe_mac_info *
123mtk_foe_entry_l2(struct mtk_foe_entry *entry)
124{
125 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
126
127 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
128 return &entry->ipv6.l2;
129
130 return &entry->ipv4.l2;
131}
132
133static inline u32 *
134mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
135{
136 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
137
138 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
139 return &entry->ipv6.ib2;
140
141 return &entry->ipv4.ib2;
142}
143
144int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
145 u8 pse_port, u8 *src_mac, u8 *dest_mac)
146{
147 struct mtk_foe_mac_info *l2;
148 u32 ports_pad, val;
149
150 memset(entry, 0, sizeof(*entry));
151
152 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
153 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
154 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
155 MTK_FOE_IB1_BIND_TTL |
156 MTK_FOE_IB1_BIND_CACHE |
157 MTK_FOE_IB1_BIND_KEEPALIVE;
158 entry->ib1 = val;
159
160 val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
161 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
162 FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
163
164 if (is_multicast_ether_addr(dest_mac))
165 val |= MTK_FOE_IB2_MULTICAST;
166
167 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
168 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
169 entry->ipv4.orig.ports = ports_pad;
170 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
171 entry->ipv6.ports = ports_pad;
172
173 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
174 entry->ipv6.ib2 = val;
175 l2 = &entry->ipv6.l2;
176 } else {
177 entry->ipv4.ib2 = val;
178 l2 = &entry->ipv4.l2;
179 }
180
181 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
182 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
183 l2->src_mac_hi = get_unaligned_be32(src_mac);
184 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
185
186 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
187 l2->etype = ETH_P_IPV6;
188 else
189 l2->etype = ETH_P_IP;
190
191 return 0;
192}
193
194int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
195 __be32 src_addr, __be16 src_port,
196 __be32 dest_addr, __be16 dest_port)
197{
198 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
199 struct mtk_ipv4_tuple *t;
200
201 switch (type) {
202 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
203 if (egress) {
204 t = &entry->ipv4.new;
205 break;
206 }
207 fallthrough;
208 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
209 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
210 t = &entry->ipv4.orig;
211 break;
212 case MTK_PPE_PKT_TYPE_IPV6_6RD:
213 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
214 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
215 return 0;
216 default:
217 WARN_ON_ONCE(1);
218 return -EINVAL;
219 }
220
221 t->src_ip = be32_to_cpu(src_addr);
222 t->dest_ip = be32_to_cpu(dest_addr);
223
224 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
225 return 0;
226
227 t->src_port = be16_to_cpu(src_port);
228 t->dest_port = be16_to_cpu(dest_port);
229
230 return 0;
231}
232
233int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
234 __be32 *src_addr, __be16 src_port,
235 __be32 *dest_addr, __be16 dest_port)
236{
237 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
238 u32 *src, *dest;
239 int i;
240
241 switch (type) {
242 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
243 src = entry->dslite.tunnel_src_ip;
244 dest = entry->dslite.tunnel_dest_ip;
245 break;
246 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
247 case MTK_PPE_PKT_TYPE_IPV6_6RD:
248 entry->ipv6.src_port = be16_to_cpu(src_port);
249 entry->ipv6.dest_port = be16_to_cpu(dest_port);
250 fallthrough;
251 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
252 src = entry->ipv6.src_ip;
253 dest = entry->ipv6.dest_ip;
254 break;
255 default:
256 WARN_ON_ONCE(1);
257 return -EINVAL;
258 };
259
260 for (i = 0; i < 4; i++)
261 src[i] = be32_to_cpu(src_addr[i]);
262 for (i = 0; i < 4; i++)
263 dest[i] = be32_to_cpu(dest_addr[i]);
264
265 return 0;
266}
267
268int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
269{
270 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
271
272 l2->etype = BIT(port);
273
274 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
275 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
276 else
277 l2->etype |= BIT(8);
278
279 entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
280
281 return 0;
282}
283
284int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
285{
286 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
287
288 switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
289 case 0:
290 entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
291 FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
292 l2->vlan1 = vid;
293 return 0;
294 case 1:
295 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
296 l2->vlan1 = vid;
297 l2->etype |= BIT(8);
298 } else {
299 l2->vlan2 = vid;
300 entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
301 }
302 return 0;
303 default:
304 return -ENOSPC;
305 }
306}
307
308int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
309{
310 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
311
312 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
313 (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
314 l2->etype = ETH_P_PPP_SES;
315
316 entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
317 l2->pppoe_id = sid;
318
319 return 0;
320}
321
322static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
323{
324 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
325 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
326}
327
328int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
329 u16 timestamp)
330{
331 struct mtk_foe_entry *hwe;
332 u32 hash;
333
334 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
335 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
336 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
337
338 hash = mtk_ppe_hash_entry(entry);
339 hwe = &ppe->foe_table[hash];
340 if (!mtk_foe_entry_usable(hwe)) {
341 hwe++;
342 hash++;
343
344 if (!mtk_foe_entry_usable(hwe))
345 return -ENOSPC;
346 }
347
348 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
349 wmb();
350 hwe->ib1 = entry->ib1;
351
352 dma_wmb();
353
354 mtk_ppe_cache_clear(ppe);
355
356 return hash;
357}
358
359int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
360 int version)
361{
362 struct mtk_foe_entry *foe;
363
364 /* need to allocate a separate device, since it PPE DMA access is
365 * not coherent.
366 */
367 ppe->base = base;
368 ppe->dev = dev;
369 ppe->version = version;
370
371 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
372 &ppe->foe_phys, GFP_KERNEL);
373 if (!foe)
374 return -ENOMEM;
375
376 ppe->foe_table = foe;
377
378 mtk_ppe_debugfs_init(ppe);
379
380 return 0;
381}
382
383static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
384{
385 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
386 int i, k;
387
388 memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
389
390 if (!IS_ENABLED(CONFIG_SOC_MT7621))
391 return;
392
393 /* skip all entries that cross the 1024 byte boundary */
394 for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
395 for (k = 0; k < ARRAY_SIZE(skip); k++)
396 ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
397}
398
399int mtk_ppe_start(struct mtk_ppe *ppe)
400{
401 u32 val;
402
403 mtk_ppe_init_foe_table(ppe);
404 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
405
406 val = MTK_PPE_TB_CFG_ENTRY_80B |
407 MTK_PPE_TB_CFG_AGE_NON_L4 |
408 MTK_PPE_TB_CFG_AGE_UNBIND |
409 MTK_PPE_TB_CFG_AGE_TCP |
410 MTK_PPE_TB_CFG_AGE_UDP |
411 MTK_PPE_TB_CFG_AGE_TCP_FIN |
412 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
413 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
414 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
415 MTK_PPE_KEEPALIVE_DUP_CPU) |
416 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
417 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
418 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
419 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
420 MTK_PPE_ENTRIES_SHIFT);
421 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
422
423 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
424 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
425
426 mtk_ppe_cache_enable(ppe, true);
427
428 val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
429 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
430 MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
431 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
432 MTK_PPE_FLOW_CFG_IP6_6RD |
433 MTK_PPE_FLOW_CFG_IP4_NAT |
434 MTK_PPE_FLOW_CFG_IP4_NAPT |
435 MTK_PPE_FLOW_CFG_IP4_DSLITE |
436 MTK_PPE_FLOW_CFG_L2_BRIDGE |
437 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
438 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
439
440 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
441 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
442 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
443
444 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
445 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
446 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
447
448 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
449 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
450 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
451
452 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
453 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
454
455 val = MTK_PPE_BIND_LIMIT1_FULL |
456 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
457 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
458
459 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
460 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
461 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
462
463 /* enable PPE */
464 val = MTK_PPE_GLO_CFG_EN |
465 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
466 MTK_PPE_GLO_CFG_IP4_CS_DROP |
467 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
468 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
469
470 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
471
472 return 0;
473}
474
475int mtk_ppe_stop(struct mtk_ppe *ppe)
476{
477 u32 val;
478 int i;
479
480 for (i = 0; i < MTK_PPE_ENTRIES; i++)
481 ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
482 MTK_FOE_STATE_INVALID);
483
484 mtk_ppe_cache_enable(ppe, false);
485
486 /* disable offload engine */
487 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
488 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
489
490 /* disable aging */
491 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
492 MTK_PPE_TB_CFG_AGE_UNBIND |
493 MTK_PPE_TB_CFG_AGE_TCP |
494 MTK_PPE_TB_CFG_AGE_UDP |
495 MTK_PPE_TB_CFG_AGE_TCP_FIN;
496 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
497
498 return mtk_ppe_wait_busy(ppe);
499}