blob: fb7ee9619887424cdb8e04508d2f61254fc1ad33 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * B53 switch driver main logic
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/delay.h>
23#include <linux/export.h>
24#include <linux/gpio.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/platform_data/b53.h>
28#include <linux/phy.h>
29#include <linux/phylink.h>
30#include <linux/etherdevice.h>
31#include <linux/if_bridge.h>
32#include <net/dsa.h>
33
34#include "b53_regs.h"
35#include "b53_priv.h"
36
37struct b53_mib_desc {
38 u8 size;
39 u8 offset;
40 const char *name;
41};
42
43/* BCM5365 MIB counters */
44static const struct b53_mib_desc b53_mibs_65[] = {
45 { 8, 0x00, "TxOctets" },
46 { 4, 0x08, "TxDropPkts" },
47 { 4, 0x10, "TxBroadcastPkts" },
48 { 4, 0x14, "TxMulticastPkts" },
49 { 4, 0x18, "TxUnicastPkts" },
50 { 4, 0x1c, "TxCollisions" },
51 { 4, 0x20, "TxSingleCollision" },
52 { 4, 0x24, "TxMultipleCollision" },
53 { 4, 0x28, "TxDeferredTransmit" },
54 { 4, 0x2c, "TxLateCollision" },
55 { 4, 0x30, "TxExcessiveCollision" },
56 { 4, 0x38, "TxPausePkts" },
57 { 8, 0x44, "RxOctets" },
58 { 4, 0x4c, "RxUndersizePkts" },
59 { 4, 0x50, "RxPausePkts" },
60 { 4, 0x54, "Pkts64Octets" },
61 { 4, 0x58, "Pkts65to127Octets" },
62 { 4, 0x5c, "Pkts128to255Octets" },
63 { 4, 0x60, "Pkts256to511Octets" },
64 { 4, 0x64, "Pkts512to1023Octets" },
65 { 4, 0x68, "Pkts1024to1522Octets" },
66 { 4, 0x6c, "RxOversizePkts" },
67 { 4, 0x70, "RxJabbers" },
68 { 4, 0x74, "RxAlignmentErrors" },
69 { 4, 0x78, "RxFCSErrors" },
70 { 8, 0x7c, "RxGoodOctets" },
71 { 4, 0x84, "RxDropPkts" },
72 { 4, 0x88, "RxUnicastPkts" },
73 { 4, 0x8c, "RxMulticastPkts" },
74 { 4, 0x90, "RxBroadcastPkts" },
75 { 4, 0x94, "RxSAChanges" },
76 { 4, 0x98, "RxFragments" },
77};
78
79#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
80
81/* BCM63xx MIB counters */
82static const struct b53_mib_desc b53_mibs_63xx[] = {
83 { 8, 0x00, "TxOctets" },
84 { 4, 0x08, "TxDropPkts" },
85 { 4, 0x0c, "TxQoSPkts" },
86 { 4, 0x10, "TxBroadcastPkts" },
87 { 4, 0x14, "TxMulticastPkts" },
88 { 4, 0x18, "TxUnicastPkts" },
89 { 4, 0x1c, "TxCollisions" },
90 { 4, 0x20, "TxSingleCollision" },
91 { 4, 0x24, "TxMultipleCollision" },
92 { 4, 0x28, "TxDeferredTransmit" },
93 { 4, 0x2c, "TxLateCollision" },
94 { 4, 0x30, "TxExcessiveCollision" },
95 { 4, 0x38, "TxPausePkts" },
96 { 8, 0x3c, "TxQoSOctets" },
97 { 8, 0x44, "RxOctets" },
98 { 4, 0x4c, "RxUndersizePkts" },
99 { 4, 0x50, "RxPausePkts" },
100 { 4, 0x54, "Pkts64Octets" },
101 { 4, 0x58, "Pkts65to127Octets" },
102 { 4, 0x5c, "Pkts128to255Octets" },
103 { 4, 0x60, "Pkts256to511Octets" },
104 { 4, 0x64, "Pkts512to1023Octets" },
105 { 4, 0x68, "Pkts1024to1522Octets" },
106 { 4, 0x6c, "RxOversizePkts" },
107 { 4, 0x70, "RxJabbers" },
108 { 4, 0x74, "RxAlignmentErrors" },
109 { 4, 0x78, "RxFCSErrors" },
110 { 8, 0x7c, "RxGoodOctets" },
111 { 4, 0x84, "RxDropPkts" },
112 { 4, 0x88, "RxUnicastPkts" },
113 { 4, 0x8c, "RxMulticastPkts" },
114 { 4, 0x90, "RxBroadcastPkts" },
115 { 4, 0x94, "RxSAChanges" },
116 { 4, 0x98, "RxFragments" },
117 { 4, 0xa0, "RxSymbolErrors" },
118 { 4, 0xa4, "RxQoSPkts" },
119 { 8, 0xa8, "RxQoSOctets" },
120 { 4, 0xb0, "Pkts1523to2047Octets" },
121 { 4, 0xb4, "Pkts2048to4095Octets" },
122 { 4, 0xb8, "Pkts4096to8191Octets" },
123 { 4, 0xbc, "Pkts8192to9728Octets" },
124 { 4, 0xc0, "RxDiscarded" },
125};
126
127#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
128
129/* MIB counters */
130static const struct b53_mib_desc b53_mibs[] = {
131 { 8, 0x00, "TxOctets" },
132 { 4, 0x08, "TxDropPkts" },
133 { 4, 0x10, "TxBroadcastPkts" },
134 { 4, 0x14, "TxMulticastPkts" },
135 { 4, 0x18, "TxUnicastPkts" },
136 { 4, 0x1c, "TxCollisions" },
137 { 4, 0x20, "TxSingleCollision" },
138 { 4, 0x24, "TxMultipleCollision" },
139 { 4, 0x28, "TxDeferredTransmit" },
140 { 4, 0x2c, "TxLateCollision" },
141 { 4, 0x30, "TxExcessiveCollision" },
142 { 4, 0x38, "TxPausePkts" },
143 { 8, 0x50, "RxOctets" },
144 { 4, 0x58, "RxUndersizePkts" },
145 { 4, 0x5c, "RxPausePkts" },
146 { 4, 0x60, "Pkts64Octets" },
147 { 4, 0x64, "Pkts65to127Octets" },
148 { 4, 0x68, "Pkts128to255Octets" },
149 { 4, 0x6c, "Pkts256to511Octets" },
150 { 4, 0x70, "Pkts512to1023Octets" },
151 { 4, 0x74, "Pkts1024to1522Octets" },
152 { 4, 0x78, "RxOversizePkts" },
153 { 4, 0x7c, "RxJabbers" },
154 { 4, 0x80, "RxAlignmentErrors" },
155 { 4, 0x84, "RxFCSErrors" },
156 { 8, 0x88, "RxGoodOctets" },
157 { 4, 0x90, "RxDropPkts" },
158 { 4, 0x94, "RxUnicastPkts" },
159 { 4, 0x98, "RxMulticastPkts" },
160 { 4, 0x9c, "RxBroadcastPkts" },
161 { 4, 0xa0, "RxSAChanges" },
162 { 4, 0xa4, "RxFragments" },
163 { 4, 0xa8, "RxJumboPkts" },
164 { 4, 0xac, "RxSymbolErrors" },
165 { 4, 0xc0, "RxDiscarded" },
166};
167
168#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
169
170static const struct b53_mib_desc b53_mibs_58xx[] = {
171 { 8, 0x00, "TxOctets" },
172 { 4, 0x08, "TxDropPkts" },
173 { 4, 0x0c, "TxQPKTQ0" },
174 { 4, 0x10, "TxBroadcastPkts" },
175 { 4, 0x14, "TxMulticastPkts" },
176 { 4, 0x18, "TxUnicastPKts" },
177 { 4, 0x1c, "TxCollisions" },
178 { 4, 0x20, "TxSingleCollision" },
179 { 4, 0x24, "TxMultipleCollision" },
180 { 4, 0x28, "TxDeferredCollision" },
181 { 4, 0x2c, "TxLateCollision" },
182 { 4, 0x30, "TxExcessiveCollision" },
183 { 4, 0x34, "TxFrameInDisc" },
184 { 4, 0x38, "TxPausePkts" },
185 { 4, 0x3c, "TxQPKTQ1" },
186 { 4, 0x40, "TxQPKTQ2" },
187 { 4, 0x44, "TxQPKTQ3" },
188 { 4, 0x48, "TxQPKTQ4" },
189 { 4, 0x4c, "TxQPKTQ5" },
190 { 8, 0x50, "RxOctets" },
191 { 4, 0x58, "RxUndersizePkts" },
192 { 4, 0x5c, "RxPausePkts" },
193 { 4, 0x60, "RxPkts64Octets" },
194 { 4, 0x64, "RxPkts65to127Octets" },
195 { 4, 0x68, "RxPkts128to255Octets" },
196 { 4, 0x6c, "RxPkts256to511Octets" },
197 { 4, 0x70, "RxPkts512to1023Octets" },
198 { 4, 0x74, "RxPkts1024toMaxPktsOctets" },
199 { 4, 0x78, "RxOversizePkts" },
200 { 4, 0x7c, "RxJabbers" },
201 { 4, 0x80, "RxAlignmentErrors" },
202 { 4, 0x84, "RxFCSErrors" },
203 { 8, 0x88, "RxGoodOctets" },
204 { 4, 0x90, "RxDropPkts" },
205 { 4, 0x94, "RxUnicastPkts" },
206 { 4, 0x98, "RxMulticastPkts" },
207 { 4, 0x9c, "RxBroadcastPkts" },
208 { 4, 0xa0, "RxSAChanges" },
209 { 4, 0xa4, "RxFragments" },
210 { 4, 0xa8, "RxJumboPkt" },
211 { 4, 0xac, "RxSymblErr" },
212 { 4, 0xb0, "InRangeErrCount" },
213 { 4, 0xb4, "OutRangeErrCount" },
214 { 4, 0xb8, "EEELpiEvent" },
215 { 4, 0xbc, "EEELpiDuration" },
216 { 4, 0xc0, "RxDiscard" },
217 { 4, 0xc8, "TxQPKTQ6" },
218 { 4, 0xcc, "TxQPKTQ7" },
219 { 4, 0xd0, "TxPkts64Octets" },
220 { 4, 0xd4, "TxPkts65to127Octets" },
221 { 4, 0xd8, "TxPkts128to255Octets" },
222 { 4, 0xdc, "TxPkts256to511Ocets" },
223 { 4, 0xe0, "TxPkts512to1023Ocets" },
224 { 4, 0xe4, "TxPkts1024toMaxPktOcets" },
225};
226
227#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
228
229static int b53_do_vlan_op(struct b53_device *dev, u8 op)
230{
231 unsigned int i;
232
233 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
234
235 for (i = 0; i < 10; i++) {
236 u8 vta;
237
238 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
239 if (!(vta & VTA_START_CMD))
240 return 0;
241
242 usleep_range(100, 200);
243 }
244
245 return -EIO;
246}
247
248static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
249 struct b53_vlan *vlan)
250{
251 if (is5325(dev)) {
252 u32 entry = 0;
253
254 if (vlan->members) {
255 entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
256 VA_UNTAG_S_25) | vlan->members;
257 if (dev->core_rev >= 3)
258 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
259 else
260 entry |= VA_VALID_25;
261 }
262
263 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
264 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
265 VTA_RW_STATE_WR | VTA_RW_OP_EN);
266 } else if (is5365(dev)) {
267 u16 entry = 0;
268
269 if (vlan->members)
270 entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
271 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
272
273 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
274 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
275 VTA_RW_STATE_WR | VTA_RW_OP_EN);
276 } else {
277 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
278 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
279 (vlan->untag << VTE_UNTAG_S) | vlan->members);
280
281 b53_do_vlan_op(dev, VTA_CMD_WRITE);
282 }
283
284 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
285 vid, vlan->members, vlan->untag);
286}
287
288static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
289 struct b53_vlan *vlan)
290{
291 if (is5325(dev)) {
292 u32 entry = 0;
293
294 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
295 VTA_RW_STATE_RD | VTA_RW_OP_EN);
296 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
297
298 if (dev->core_rev >= 3)
299 vlan->valid = !!(entry & VA_VALID_25_R4);
300 else
301 vlan->valid = !!(entry & VA_VALID_25);
302 vlan->members = entry & VA_MEMBER_MASK;
303 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
304
305 } else if (is5365(dev)) {
306 u16 entry = 0;
307
308 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
309 VTA_RW_STATE_WR | VTA_RW_OP_EN);
310 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
311
312 vlan->valid = !!(entry & VA_VALID_65);
313 vlan->members = entry & VA_MEMBER_MASK;
314 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
315 } else {
316 u32 entry = 0;
317
318 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
319 b53_do_vlan_op(dev, VTA_CMD_READ);
320 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
321 vlan->members = entry & VTE_MEMBERS;
322 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
323 vlan->valid = true;
324 }
325}
326
327static void b53_set_forwarding(struct b53_device *dev, int enable)
328{
329 u8 mgmt;
330
331 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
332
333 if (enable)
334 mgmt |= SM_SW_FWD_EN;
335 else
336 mgmt &= ~SM_SW_FWD_EN;
337
338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
339
340 /* Include IMP port in dumb forwarding mode
341 */
342 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
343 mgmt |= B53_MII_DUMB_FWDG_EN;
344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
345
346 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
347 * frames should be flooded or not.
348 */
349 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
350 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
351 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
352}
353
354static void b53_enable_vlan(struct b53_device *dev, bool enable,
355 bool enable_filtering)
356{
357 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
358
359 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
360 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
361 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
362
363 if (is5325(dev) || is5365(dev)) {
364 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
365 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
366 } else if (is63xx(dev)) {
367 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
368 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
369 } else {
370 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
371 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
372 }
373
374 if (enable) {
375 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
376 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
377 vc4 &= ~VC4_ING_VID_CHECK_MASK;
378 if (enable_filtering) {
379 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
380 vc5 |= VC5_DROP_VTABLE_MISS;
381 } else {
382 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
383 vc5 &= ~VC5_DROP_VTABLE_MISS;
384 }
385
386 if (is5325(dev))
387 vc0 &= ~VC0_RESERVED_1;
388
389 if (is5325(dev) || is5365(dev))
390 vc1 |= VC1_RX_MCST_TAG_EN;
391
392 } else {
393 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
394 vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
395 vc4 &= ~VC4_ING_VID_CHECK_MASK;
396 vc5 &= ~VC5_DROP_VTABLE_MISS;
397
398 if (is5325(dev) || is5365(dev))
399 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
400 else
401 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
402
403 if (is5325(dev) || is5365(dev))
404 vc1 &= ~VC1_RX_MCST_TAG_EN;
405 }
406
407 if (!is5325(dev) && !is5365(dev))
408 vc5 &= ~VC5_VID_FFF_EN;
409
410 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
411 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
412
413 if (is5325(dev) || is5365(dev)) {
414 /* enable the high 8 bit vid check on 5325 */
415 if (is5325(dev) && enable)
416 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
417 VC3_HIGH_8BIT_EN);
418 else
419 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
420
421 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
422 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
423 } else if (is63xx(dev)) {
424 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
425 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
426 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
427 } else {
428 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
429 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
430 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
431 }
432
433 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
434
435 dev->vlan_enabled = enable;
436}
437
438static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
439{
440 u32 port_mask = 0;
441 u16 max_size = JMS_MIN_SIZE;
442
443 if (is5325(dev) || is5365(dev))
444 return -EINVAL;
445
446 if (enable) {
447 port_mask = dev->enabled_ports;
448 max_size = JMS_MAX_SIZE;
449 if (allow_10_100)
450 port_mask |= JPM_10_100_JUMBO_EN;
451 }
452
453 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
454 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
455}
456
457static int b53_flush_arl(struct b53_device *dev, u8 mask)
458{
459 unsigned int i;
460
461 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
462 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
463
464 for (i = 0; i < 10; i++) {
465 u8 fast_age_ctrl;
466
467 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
468 &fast_age_ctrl);
469
470 if (!(fast_age_ctrl & FAST_AGE_DONE))
471 goto out;
472
473 msleep(1);
474 }
475
476 return -ETIMEDOUT;
477out:
478 /* Only age dynamic entries (default behavior) */
479 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
480 return 0;
481}
482
483static int b53_fast_age_port(struct b53_device *dev, int port)
484{
485 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
486
487 return b53_flush_arl(dev, FAST_AGE_PORT);
488}
489
490static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
491{
492 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
493
494 return b53_flush_arl(dev, FAST_AGE_VLAN);
495}
496
497void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
498{
499 struct b53_device *dev = ds->priv;
500 unsigned int i;
501 u16 pvlan;
502
503 /* Enable the IMP port to be in the same VLAN as the other ports
504 * on a per-port basis such that we only have Port i and IMP in
505 * the same VLAN.
506 */
507 b53_for_each_port(dev, i) {
508 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
509 pvlan |= BIT(cpu_port);
510 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
511 }
512}
513EXPORT_SYMBOL(b53_imp_vlan_setup);
514
515static void b53_port_set_learning(struct b53_device *dev, int port,
516 bool learning)
517{
518 u16 reg;
519
520 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
521 if (learning)
522 reg &= ~BIT(port);
523 else
524 reg |= BIT(port);
525 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
526}
527
528int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
529{
530 struct b53_device *dev = ds->priv;
531 unsigned int cpu_port;
532 int ret = 0;
533 u16 pvlan;
534
535 if (!dsa_is_user_port(ds, port))
536 return 0;
537
538 cpu_port = ds->ports[port].cpu_dp->index;
539
540 b53_br_egress_floods(ds, port, true, true);
541 b53_port_set_learning(dev, port, false);
542
543 if (dev->ops->irq_enable)
544 ret = dev->ops->irq_enable(dev, port);
545 if (ret)
546 return ret;
547
548 /* Clear the Rx and Tx disable bits and set to no spanning tree */
549 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
550
551 /* Set this port, and only this one to be in the default VLAN,
552 * if member of a bridge, restore its membership prior to
553 * bringing down this port.
554 */
555 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
556 pvlan &= ~0x1ff;
557 pvlan |= BIT(port);
558 pvlan |= dev->ports[port].vlan_ctl_mask;
559 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
560
561 b53_imp_vlan_setup(ds, cpu_port);
562
563 /* If EEE was enabled, restore it */
564 if (dev->ports[port].eee.eee_enabled)
565 b53_eee_enable_set(ds, port, true);
566
567 return 0;
568}
569EXPORT_SYMBOL(b53_enable_port);
570
571void b53_disable_port(struct dsa_switch *ds, int port)
572{
573 struct b53_device *dev = ds->priv;
574 u8 reg;
575
576 /* Disable Tx/Rx for the port */
577 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
578 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
579 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
580
581 if (dev->ops->irq_disable)
582 dev->ops->irq_disable(dev, port);
583}
584EXPORT_SYMBOL(b53_disable_port);
585
586void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
587{
588 struct b53_device *dev = ds->priv;
589 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
590 u8 hdr_ctl, val;
591 u16 reg;
592
593 /* Resolve which bit controls the Broadcom tag */
594 switch (port) {
595 case 8:
596 val = BRCM_HDR_P8_EN;
597 break;
598 case 7:
599 val = BRCM_HDR_P7_EN;
600 break;
601 case 5:
602 val = BRCM_HDR_P5_EN;
603 break;
604 default:
605 val = 0;
606 break;
607 }
608
609 /* Enable management mode if tagging is requested */
610 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
611 if (tag_en)
612 hdr_ctl |= SM_SW_FWD_MODE;
613 else
614 hdr_ctl &= ~SM_SW_FWD_MODE;
615 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
616
617 /* Configure the appropriate IMP port */
618 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
619 if (port == 8)
620 hdr_ctl |= GC_FRM_MGMT_PORT_MII;
621 else if (port == 5)
622 hdr_ctl |= GC_FRM_MGMT_PORT_M;
623 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
624
625 /* Enable Broadcom tags for IMP port */
626 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
627 if (tag_en)
628 hdr_ctl |= val;
629 else
630 hdr_ctl &= ~val;
631 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
632
633 /* Registers below are only accessible on newer devices */
634 if (!is58xx(dev))
635 return;
636
637 /* Enable reception Broadcom tag for CPU TX (switch RX) to
638 * allow us to tag outgoing frames
639 */
640 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, &reg);
641 if (tag_en)
642 reg &= ~BIT(port);
643 else
644 reg |= BIT(port);
645 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
646
647 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
648 * allow delivering frames to the per-port net_devices
649 */
650 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, &reg);
651 if (tag_en)
652 reg &= ~BIT(port);
653 else
654 reg |= BIT(port);
655 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
656}
657EXPORT_SYMBOL(b53_brcm_hdr_setup);
658
659static void b53_enable_cpu_port(struct b53_device *dev, int port)
660{
661 u8 port_ctrl;
662
663 /* BCM5325 CPU port is at 8 */
664 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
665 port = B53_CPU_PORT;
666
667 port_ctrl = PORT_CTRL_RX_BCST_EN |
668 PORT_CTRL_RX_MCST_EN |
669 PORT_CTRL_RX_UCST_EN;
670 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
671
672 b53_brcm_hdr_setup(dev->ds, port);
673
674 b53_br_egress_floods(dev->ds, port, true, true);
675 b53_port_set_learning(dev, port, false);
676}
677
678static void b53_enable_mib(struct b53_device *dev)
679{
680 u8 gc;
681
682 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
683 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
684 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
685}
686
687static u16 b53_default_pvid(struct b53_device *dev)
688{
689 if (is5325(dev) || is5365(dev))
690 return 1;
691 else
692 return 0;
693}
694
695int b53_configure_vlan(struct dsa_switch *ds)
696{
697 struct b53_device *dev = ds->priv;
698 struct b53_vlan vl = { 0 };
699 struct b53_vlan *v;
700 int i, def_vid;
701 u16 vid;
702
703 def_vid = b53_default_pvid(dev);
704
705 /* clear all vlan entries */
706 if (is5325(dev) || is5365(dev)) {
707 for (i = def_vid; i < dev->num_vlans; i++)
708 b53_set_vlan_entry(dev, i, &vl);
709 } else {
710 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
711 }
712
713 b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
714
715 b53_for_each_port(dev, i)
716 b53_write16(dev, B53_VLAN_PAGE,
717 B53_VLAN_PORT_DEF_TAG(i), def_vid);
718
719 if (!is5325(dev) && !is5365(dev))
720 b53_set_jumbo(dev, dev->enable_jumbo, false);
721
722 /* Upon initial call we have not set-up any VLANs, but upon
723 * system resume, we need to restore all VLAN entries.
724 */
725 for (vid = def_vid; vid < dev->num_vlans; vid++) {
726 v = &dev->vlans[vid];
727
728 if (!v->members)
729 continue;
730
731 b53_set_vlan_entry(dev, vid, v);
732 b53_fast_age_vlan(dev, vid);
733 }
734
735 return 0;
736}
737EXPORT_SYMBOL(b53_configure_vlan);
738
739static void b53_switch_reset_gpio(struct b53_device *dev)
740{
741 int gpio = dev->reset_gpio;
742
743 if (gpio < 0)
744 return;
745
746 /* Reset sequence: RESET low(50ms)->high(20ms)
747 */
748 gpio_set_value(gpio, 0);
749 mdelay(50);
750
751 gpio_set_value(gpio, 1);
752 mdelay(20);
753
754 dev->current_page = 0xff;
755}
756
757static int b53_switch_reset(struct b53_device *dev)
758{
759 unsigned int timeout = 1000;
760 u8 mgmt, reg;
761
762 b53_switch_reset_gpio(dev);
763
764 if (is539x(dev)) {
765 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
766 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
767 }
768
769 /* This is specific to 58xx devices here, do not use is58xx() which
770 * covers the larger Starfigther 2 family, including 7445/7278 which
771 * still use this driver as a library and need to perform the reset
772 * earlier.
773 */
774 if (dev->chip_id == BCM58XX_DEVICE_ID ||
775 dev->chip_id == BCM583XX_DEVICE_ID) {
776 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
777 reg |= SW_RST | EN_SW_RST | EN_CH_RST;
778 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
779
780 do {
781 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
782 if (!(reg & SW_RST))
783 break;
784
785 usleep_range(1000, 2000);
786 } while (timeout-- > 0);
787
788 if (timeout == 0)
789 return -ETIMEDOUT;
790 }
791
792 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
793
794 if (!(mgmt & SM_SW_FWD_EN)) {
795 mgmt &= ~SM_SW_FWD_MODE;
796 mgmt |= SM_SW_FWD_EN;
797
798 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
799 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
800
801 if (!(mgmt & SM_SW_FWD_EN)) {
802 dev_err(dev->dev, "Failed to enable switch!\n");
803 return -EINVAL;
804 }
805 }
806
807 b53_enable_mib(dev);
808
809 return b53_flush_arl(dev, FAST_AGE_STATIC);
810}
811
812static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
813{
814 struct b53_device *priv = ds->priv;
815 u16 value = 0;
816 int ret;
817
818 if (priv->ops->phy_read16)
819 ret = priv->ops->phy_read16(priv, addr, reg, &value);
820 else
821 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
822 reg * 2, &value);
823
824 return ret ? ret : value;
825}
826
827static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
828{
829 struct b53_device *priv = ds->priv;
830
831 if (priv->ops->phy_write16)
832 return priv->ops->phy_write16(priv, addr, reg, val);
833
834 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
835}
836
837static int b53_reset_switch(struct b53_device *priv)
838{
839 /* reset vlans */
840 priv->enable_jumbo = false;
841
842 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
843 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
844
845 priv->serdes_lane = B53_INVALID_LANE;
846
847 return b53_switch_reset(priv);
848}
849
850static int b53_apply_config(struct b53_device *priv)
851{
852 /* disable switching */
853 b53_set_forwarding(priv, 0);
854
855 b53_configure_vlan(priv->ds);
856
857 /* enable switching */
858 b53_set_forwarding(priv, 1);
859
860 return 0;
861}
862
863static void b53_reset_mib(struct b53_device *priv)
864{
865 u8 gc;
866
867 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
868
869 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
870 msleep(1);
871 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
872 msleep(1);
873}
874
875static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
876{
877 if (is5365(dev))
878 return b53_mibs_65;
879 else if (is63xx(dev))
880 return b53_mibs_63xx;
881 else if (is58xx(dev))
882 return b53_mibs_58xx;
883 else
884 return b53_mibs;
885}
886
887static unsigned int b53_get_mib_size(struct b53_device *dev)
888{
889 if (is5365(dev))
890 return B53_MIBS_65_SIZE;
891 else if (is63xx(dev))
892 return B53_MIBS_63XX_SIZE;
893 else if (is58xx(dev))
894 return B53_MIBS_58XX_SIZE;
895 else
896 return B53_MIBS_SIZE;
897}
898
899static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
900{
901 /* These ports typically do not have built-in PHYs */
902 switch (port) {
903 case B53_CPU_PORT_25:
904 case 7:
905 case B53_CPU_PORT:
906 return NULL;
907 }
908
909 return mdiobus_get_phy(ds->slave_mii_bus, port);
910}
911
912void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
913 uint8_t *data)
914{
915 struct b53_device *dev = ds->priv;
916 const struct b53_mib_desc *mibs = b53_get_mib(dev);
917 unsigned int mib_size = b53_get_mib_size(dev);
918 struct phy_device *phydev;
919 unsigned int i;
920
921 if (stringset == ETH_SS_STATS) {
922 for (i = 0; i < mib_size; i++)
923 strlcpy(data + i * ETH_GSTRING_LEN,
924 mibs[i].name, ETH_GSTRING_LEN);
925 } else if (stringset == ETH_SS_PHY_STATS) {
926 phydev = b53_get_phy_device(ds, port);
927 if (!phydev)
928 return;
929
930 phy_ethtool_get_strings(phydev, data);
931 }
932}
933EXPORT_SYMBOL(b53_get_strings);
934
935void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
936{
937 struct b53_device *dev = ds->priv;
938 const struct b53_mib_desc *mibs = b53_get_mib(dev);
939 unsigned int mib_size = b53_get_mib_size(dev);
940 const struct b53_mib_desc *s;
941 unsigned int i;
942 u64 val = 0;
943
944 if (is5365(dev) && port == 5)
945 port = 8;
946
947 mutex_lock(&dev->stats_mutex);
948
949 for (i = 0; i < mib_size; i++) {
950 s = &mibs[i];
951
952 if (s->size == 8) {
953 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
954 } else {
955 u32 val32;
956
957 b53_read32(dev, B53_MIB_PAGE(port), s->offset,
958 &val32);
959 val = val32;
960 }
961 data[i] = (u64)val;
962 }
963
964 mutex_unlock(&dev->stats_mutex);
965}
966EXPORT_SYMBOL(b53_get_ethtool_stats);
967
968void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
969{
970 struct phy_device *phydev;
971
972 phydev = b53_get_phy_device(ds, port);
973 if (!phydev)
974 return;
975
976 phy_ethtool_get_stats(phydev, NULL, data);
977}
978EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
979
980int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
981{
982 struct b53_device *dev = ds->priv;
983 struct phy_device *phydev;
984
985 if (sset == ETH_SS_STATS) {
986 return b53_get_mib_size(dev);
987 } else if (sset == ETH_SS_PHY_STATS) {
988 phydev = b53_get_phy_device(ds, port);
989 if (!phydev)
990 return 0;
991
992 return phy_ethtool_get_sset_count(phydev);
993 }
994
995 return 0;
996}
997EXPORT_SYMBOL(b53_get_sset_count);
998
999static int b53_setup(struct dsa_switch *ds)
1000{
1001 struct b53_device *dev = ds->priv;
1002 unsigned int port;
1003 int ret;
1004
1005 ret = b53_reset_switch(dev);
1006 if (ret) {
1007 dev_err(ds->dev, "failed to reset switch\n");
1008 return ret;
1009 }
1010
1011 b53_reset_mib(dev);
1012
1013 ret = b53_apply_config(dev);
1014 if (ret)
1015 dev_err(ds->dev, "failed to apply configuration\n");
1016
1017 /* Configure IMP/CPU port, disable all other ports. Enabled
1018 * ports will be configured with .port_enable
1019 */
1020 for (port = 0; port < dev->num_ports; port++) {
1021 if (dsa_is_cpu_port(ds, port))
1022 b53_enable_cpu_port(dev, port);
1023 else
1024 b53_disable_port(ds, port);
1025 }
1026
1027 return ret;
1028}
1029
1030static void b53_force_link(struct b53_device *dev, int port, int link)
1031{
1032 u8 reg, val, off;
1033
1034 /* Override the port settings */
1035 if (port == dev->cpu_port) {
1036 off = B53_PORT_OVERRIDE_CTRL;
1037 val = PORT_OVERRIDE_EN;
1038 } else {
1039 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1040 val = GMII_PO_EN;
1041 }
1042
1043 b53_read8(dev, B53_CTRL_PAGE, off, &reg);
1044 reg |= val;
1045 if (link)
1046 reg |= PORT_OVERRIDE_LINK;
1047 else
1048 reg &= ~PORT_OVERRIDE_LINK;
1049 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1050}
1051
1052static void b53_force_port_config(struct b53_device *dev, int port,
1053 int speed, int duplex, int pause)
1054{
1055 u8 reg, val, off;
1056
1057 /* Override the port settings */
1058 if (port == dev->cpu_port) {
1059 off = B53_PORT_OVERRIDE_CTRL;
1060 val = PORT_OVERRIDE_EN;
1061 } else {
1062 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1063 val = GMII_PO_EN;
1064 }
1065
1066 b53_read8(dev, B53_CTRL_PAGE, off, &reg);
1067 reg |= val;
1068 if (duplex == DUPLEX_FULL)
1069 reg |= PORT_OVERRIDE_FULL_DUPLEX;
1070 else
1071 reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
1072
1073 switch (speed) {
1074 case 2000:
1075 reg |= PORT_OVERRIDE_SPEED_2000M;
1076 /* fallthrough */
1077 case SPEED_1000:
1078 reg |= PORT_OVERRIDE_SPEED_1000M;
1079 break;
1080 case SPEED_100:
1081 reg |= PORT_OVERRIDE_SPEED_100M;
1082 break;
1083 case SPEED_10:
1084 reg |= PORT_OVERRIDE_SPEED_10M;
1085 break;
1086 default:
1087 dev_err(dev->dev, "unknown speed: %d\n", speed);
1088 return;
1089 }
1090
1091 if (pause & MLO_PAUSE_RX)
1092 reg |= PORT_OVERRIDE_RX_FLOW;
1093 if (pause & MLO_PAUSE_TX)
1094 reg |= PORT_OVERRIDE_TX_FLOW;
1095
1096 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1097}
1098
1099static void b53_adjust_link(struct dsa_switch *ds, int port,
1100 struct phy_device *phydev)
1101{
1102 struct b53_device *dev = ds->priv;
1103 struct ethtool_eee *p = &dev->ports[port].eee;
1104 u8 rgmii_ctrl = 0, reg = 0, off;
1105 int pause = 0;
1106
1107 if (!phy_is_pseudo_fixed_link(phydev))
1108 return;
1109
1110 /* Enable flow control on BCM5301x's CPU port */
1111 if (is5301x(dev) && port == dev->cpu_port)
1112 pause = MLO_PAUSE_TXRX_MASK;
1113
1114 if (phydev->pause) {
1115 if (phydev->asym_pause)
1116 pause |= MLO_PAUSE_TX;
1117 pause |= MLO_PAUSE_RX;
1118 }
1119
1120 b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause);
1121 b53_force_link(dev, port, phydev->link);
1122
1123 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
1124 if (port == 8)
1125 off = B53_RGMII_CTRL_IMP;
1126 else
1127 off = B53_RGMII_CTRL_P(port);
1128
1129 /* Configure the port RGMII clock delay by DLL disabled and
1130 * tx_clk aligned timing (restoring to reset defaults)
1131 */
1132 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1133 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
1134 RGMII_CTRL_TIMING_SEL);
1135
1136 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
1137 * sure that we enable the port TX clock internal delay to
1138 * account for this internal delay that is inserted, otherwise
1139 * the switch won't be able to receive correctly.
1140 *
1141 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
1142 * any delay neither on transmission nor reception, so the
1143 * BCM53125 must also be configured accordingly to account for
1144 * the lack of delay and introduce
1145 *
1146 * The BCM53125 switch has its RX clock and TX clock control
1147 * swapped, hence the reason why we modify the TX clock path in
1148 * the "RGMII" case
1149 */
1150 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
1151 rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
1152 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
1153 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
1154 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
1155 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1156
1157 dev_info(ds->dev, "Configured port %d for %s\n", port,
1158 phy_modes(phydev->interface));
1159 }
1160
1161 /* configure MII port if necessary */
1162 if (is5325(dev)) {
1163 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1164 &reg);
1165
1166 /* reverse mii needs to be enabled */
1167 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1168 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1169 reg | PORT_OVERRIDE_RV_MII_25);
1170 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1171 &reg);
1172
1173 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1174 dev_err(ds->dev,
1175 "Failed to enable reverse MII mode\n");
1176 return;
1177 }
1178 }
1179 } else if (is5301x(dev)) {
1180 if (port != dev->cpu_port) {
1181 b53_force_port_config(dev, dev->cpu_port, 2000,
1182 DUPLEX_FULL, MLO_PAUSE_TXRX_MASK);
1183 b53_force_link(dev, dev->cpu_port, 1);
1184 }
1185 }
1186
1187 /* Re-negotiate EEE if it was enabled already */
1188 p->eee_enabled = b53_eee_init(ds, port, phydev);
1189}
1190
1191void b53_port_event(struct dsa_switch *ds, int port)
1192{
1193 struct b53_device *dev = ds->priv;
1194 bool link;
1195 u16 sts;
1196
1197 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1198 link = !!(sts & BIT(port));
1199 dsa_port_phylink_mac_change(ds, port, link);
1200}
1201EXPORT_SYMBOL(b53_port_event);
1202
1203void b53_phylink_validate(struct dsa_switch *ds, int port,
1204 unsigned long *supported,
1205 struct phylink_link_state *state)
1206{
1207 struct b53_device *dev = ds->priv;
1208 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1209
1210 if (dev->ops->serdes_phylink_validate)
1211 dev->ops->serdes_phylink_validate(dev, port, mask, state);
1212
1213 /* Allow all the expected bits */
1214 phylink_set(mask, Autoneg);
1215 phylink_set_port_modes(mask);
1216 phylink_set(mask, Pause);
1217 phylink_set(mask, Asym_Pause);
1218
1219 /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
1220 * support Gigabit, including Half duplex.
1221 */
1222 if (state->interface != PHY_INTERFACE_MODE_MII &&
1223 state->interface != PHY_INTERFACE_MODE_REVMII &&
1224 !phy_interface_mode_is_8023z(state->interface) &&
1225 !(is5325(dev) || is5365(dev))) {
1226 phylink_set(mask, 1000baseT_Full);
1227 phylink_set(mask, 1000baseT_Half);
1228 }
1229
1230 if (!phy_interface_mode_is_8023z(state->interface)) {
1231 phylink_set(mask, 10baseT_Half);
1232 phylink_set(mask, 10baseT_Full);
1233 phylink_set(mask, 100baseT_Half);
1234 phylink_set(mask, 100baseT_Full);
1235 }
1236
1237 bitmap_and(supported, supported, mask,
1238 __ETHTOOL_LINK_MODE_MASK_NBITS);
1239 bitmap_and(state->advertising, state->advertising, mask,
1240 __ETHTOOL_LINK_MODE_MASK_NBITS);
1241
1242 phylink_helper_basex_speed(state);
1243}
1244EXPORT_SYMBOL(b53_phylink_validate);
1245
1246int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
1247 struct phylink_link_state *state)
1248{
1249 struct b53_device *dev = ds->priv;
1250 int ret = -EOPNOTSUPP;
1251
1252 if ((phy_interface_mode_is_8023z(state->interface) ||
1253 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1254 dev->ops->serdes_link_state)
1255 ret = dev->ops->serdes_link_state(dev, port, state);
1256
1257 return ret;
1258}
1259EXPORT_SYMBOL(b53_phylink_mac_link_state);
1260
1261void b53_phylink_mac_config(struct dsa_switch *ds, int port,
1262 unsigned int mode,
1263 const struct phylink_link_state *state)
1264{
1265 struct b53_device *dev = ds->priv;
1266
1267 if (mode == MLO_AN_PHY)
1268 return;
1269
1270 if (mode == MLO_AN_FIXED) {
1271 b53_force_port_config(dev, port, state->speed,
1272 state->duplex, state->pause);
1273 return;
1274 }
1275
1276 if ((phy_interface_mode_is_8023z(state->interface) ||
1277 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1278 dev->ops->serdes_config)
1279 dev->ops->serdes_config(dev, port, mode, state);
1280}
1281EXPORT_SYMBOL(b53_phylink_mac_config);
1282
1283void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
1284{
1285 struct b53_device *dev = ds->priv;
1286
1287 if (dev->ops->serdes_an_restart)
1288 dev->ops->serdes_an_restart(dev, port);
1289}
1290EXPORT_SYMBOL(b53_phylink_mac_an_restart);
1291
1292void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
1293 unsigned int mode,
1294 phy_interface_t interface)
1295{
1296 struct b53_device *dev = ds->priv;
1297
1298 if (mode == MLO_AN_PHY)
1299 return;
1300
1301 if (mode == MLO_AN_FIXED) {
1302 b53_force_link(dev, port, false);
1303 return;
1304 }
1305
1306 if (phy_interface_mode_is_8023z(interface) &&
1307 dev->ops->serdes_link_set)
1308 dev->ops->serdes_link_set(dev, port, mode, interface, false);
1309}
1310EXPORT_SYMBOL(b53_phylink_mac_link_down);
1311
1312void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
1313 unsigned int mode,
1314 phy_interface_t interface,
1315 struct phy_device *phydev)
1316{
1317 struct b53_device *dev = ds->priv;
1318
1319 if (mode == MLO_AN_PHY)
1320 return;
1321
1322 if (mode == MLO_AN_FIXED) {
1323 b53_force_link(dev, port, true);
1324 return;
1325 }
1326
1327 if (phy_interface_mode_is_8023z(interface) &&
1328 dev->ops->serdes_link_set)
1329 dev->ops->serdes_link_set(dev, port, mode, interface, true);
1330}
1331EXPORT_SYMBOL(b53_phylink_mac_link_up);
1332
1333int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
1334{
1335 struct b53_device *dev = ds->priv;
1336 u16 pvid, new_pvid;
1337
1338 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1339 if (!vlan_filtering) {
1340 /* Filtering is currently enabled, use the default PVID since
1341 * the bridge does not expect tagging anymore
1342 */
1343 dev->ports[port].pvid = pvid;
1344 new_pvid = b53_default_pvid(dev);
1345 } else {
1346 /* Filtering is currently disabled, restore the previous PVID */
1347 new_pvid = dev->ports[port].pvid;
1348 }
1349
1350 if (pvid != new_pvid)
1351 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1352 new_pvid);
1353
1354 b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
1355
1356 return 0;
1357}
1358EXPORT_SYMBOL(b53_vlan_filtering);
1359
1360int b53_vlan_prepare(struct dsa_switch *ds, int port,
1361 const struct switchdev_obj_port_vlan *vlan)
1362{
1363 struct b53_device *dev = ds->priv;
1364
1365 if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
1366 return -EOPNOTSUPP;
1367
1368 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
1369 * receiving VLAN tagged frames at all, we can still allow the port to
1370 * be configured for egress untagged.
1371 */
1372 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
1373 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1374 return -EINVAL;
1375
1376 if (vlan->vid_end >= dev->num_vlans)
1377 return -ERANGE;
1378
1379 b53_enable_vlan(dev, true, ds->vlan_filtering);
1380
1381 return 0;
1382}
1383EXPORT_SYMBOL(b53_vlan_prepare);
1384
1385void b53_vlan_add(struct dsa_switch *ds, int port,
1386 const struct switchdev_obj_port_vlan *vlan)
1387{
1388 struct b53_device *dev = ds->priv;
1389 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1390 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1391 struct b53_vlan *vl;
1392 u16 vid;
1393
1394 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1395 vl = &dev->vlans[vid];
1396
1397 b53_get_vlan_entry(dev, vid, vl);
1398
1399 if (vid == 0 && vid == b53_default_pvid(dev))
1400 untagged = true;
1401
1402 vl->members |= BIT(port);
1403 if (untagged && !dsa_is_cpu_port(ds, port))
1404 vl->untag |= BIT(port);
1405 else
1406 vl->untag &= ~BIT(port);
1407
1408 b53_set_vlan_entry(dev, vid, vl);
1409 b53_fast_age_vlan(dev, vid);
1410 }
1411
1412 if (pvid && !dsa_is_cpu_port(ds, port)) {
1413 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1414 vlan->vid_end);
1415 b53_fast_age_vlan(dev, vid);
1416 }
1417}
1418EXPORT_SYMBOL(b53_vlan_add);
1419
1420int b53_vlan_del(struct dsa_switch *ds, int port,
1421 const struct switchdev_obj_port_vlan *vlan)
1422{
1423 struct b53_device *dev = ds->priv;
1424 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1425 struct b53_vlan *vl;
1426 u16 vid;
1427 u16 pvid;
1428
1429 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1430
1431 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1432 vl = &dev->vlans[vid];
1433
1434 b53_get_vlan_entry(dev, vid, vl);
1435
1436 vl->members &= ~BIT(port);
1437
1438 if (pvid == vid)
1439 pvid = b53_default_pvid(dev);
1440
1441 if (untagged && !dsa_is_cpu_port(ds, port))
1442 vl->untag &= ~(BIT(port));
1443
1444 b53_set_vlan_entry(dev, vid, vl);
1445 b53_fast_age_vlan(dev, vid);
1446 }
1447
1448 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1449 b53_fast_age_vlan(dev, pvid);
1450
1451 return 0;
1452}
1453EXPORT_SYMBOL(b53_vlan_del);
1454
1455/* Address Resolution Logic routines */
1456static int b53_arl_op_wait(struct b53_device *dev)
1457{
1458 unsigned int timeout = 10;
1459 u8 reg;
1460
1461 do {
1462 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1463 if (!(reg & ARLTBL_START_DONE))
1464 return 0;
1465
1466 usleep_range(1000, 2000);
1467 } while (timeout--);
1468
1469 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1470
1471 return -ETIMEDOUT;
1472}
1473
1474static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1475{
1476 u8 reg;
1477
1478 if (op > ARLTBL_RW)
1479 return -EINVAL;
1480
1481 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1482 reg |= ARLTBL_START_DONE;
1483 if (op)
1484 reg |= ARLTBL_RW;
1485 else
1486 reg &= ~ARLTBL_RW;
1487 if (dev->vlan_enabled)
1488 reg &= ~ARLTBL_IVL_SVL_SELECT;
1489 else
1490 reg |= ARLTBL_IVL_SVL_SELECT;
1491 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1492
1493 return b53_arl_op_wait(dev);
1494}
1495
1496static int b53_arl_read(struct b53_device *dev, u64 mac,
1497 u16 vid, struct b53_arl_entry *ent, u8 *idx)
1498{
1499 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
1500 unsigned int i;
1501 int ret;
1502
1503 ret = b53_arl_op_wait(dev);
1504 if (ret)
1505 return ret;
1506
1507 bitmap_zero(free_bins, dev->num_arl_bins);
1508
1509 /* Read the bins */
1510 for (i = 0; i < dev->num_arl_bins; i++) {
1511 u64 mac_vid;
1512 u32 fwd_entry;
1513
1514 b53_read64(dev, B53_ARLIO_PAGE,
1515 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
1516 b53_read32(dev, B53_ARLIO_PAGE,
1517 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
1518 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1519
1520 if (!(fwd_entry & ARLTBL_VALID)) {
1521 set_bit(i, free_bins);
1522 continue;
1523 }
1524 if ((mac_vid & ARLTBL_MAC_MASK) != mac)
1525 continue;
1526 if (dev->vlan_enabled &&
1527 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
1528 continue;
1529 *idx = i;
1530 return 0;
1531 }
1532
1533 if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
1534 return -ENOSPC;
1535
1536 *idx = find_first_bit(free_bins, dev->num_arl_bins);
1537
1538 return -ENOENT;
1539}
1540
1541static int b53_arl_op(struct b53_device *dev, int op, int port,
1542 const unsigned char *addr, u16 vid, bool is_valid)
1543{
1544 struct b53_arl_entry ent;
1545 u32 fwd_entry;
1546 u64 mac, mac_vid = 0;
1547 u8 idx = 0;
1548 int ret;
1549
1550 /* Convert the array into a 64-bit MAC */
1551 mac = ether_addr_to_u64(addr);
1552
1553 /* Perform a read for the given MAC and VID */
1554 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1555 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1556
1557 /* Issue a read operation for this MAC */
1558 ret = b53_arl_rw_op(dev, 1);
1559 if (ret)
1560 return ret;
1561
1562 ret = b53_arl_read(dev, mac, vid, &ent, &idx);
1563
1564 /* If this is a read, just finish now */
1565 if (op)
1566 return ret;
1567
1568 switch (ret) {
1569 case -ETIMEDOUT:
1570 return ret;
1571 case -ENOSPC:
1572 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
1573 addr, vid);
1574 return is_valid ? ret : 0;
1575 case -ENOENT:
1576 /* We could not find a matching MAC, so reset to a new entry */
1577 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
1578 addr, vid, idx);
1579 fwd_entry = 0;
1580 break;
1581 default:
1582 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
1583 addr, vid, idx);
1584 break;
1585 }
1586
1587 /* For multicast address, the port is a bitmask and the validity
1588 * is determined by having at least one port being still active
1589 */
1590 if (!is_multicast_ether_addr(addr)) {
1591 ent.port = port;
1592 ent.is_valid = is_valid;
1593 } else {
1594 if (is_valid)
1595 ent.port |= BIT(port);
1596 else
1597 ent.port &= ~BIT(port);
1598
1599 ent.is_valid = !!(ent.port);
1600 }
1601
1602 ent.vid = vid;
1603 ent.is_static = true;
1604 ent.is_age = false;
1605 memcpy(ent.mac, addr, ETH_ALEN);
1606 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
1607
1608 b53_write64(dev, B53_ARLIO_PAGE,
1609 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1610 b53_write32(dev, B53_ARLIO_PAGE,
1611 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1612
1613 return b53_arl_rw_op(dev, 0);
1614}
1615
1616int b53_fdb_add(struct dsa_switch *ds, int port,
1617 const unsigned char *addr, u16 vid)
1618{
1619 struct b53_device *priv = ds->priv;
1620
1621 /* 5325 and 5365 require some more massaging, but could
1622 * be supported eventually
1623 */
1624 if (is5325(priv) || is5365(priv))
1625 return -EOPNOTSUPP;
1626
1627 return b53_arl_op(priv, 0, port, addr, vid, true);
1628}
1629EXPORT_SYMBOL(b53_fdb_add);
1630
1631int b53_fdb_del(struct dsa_switch *ds, int port,
1632 const unsigned char *addr, u16 vid)
1633{
1634 struct b53_device *priv = ds->priv;
1635
1636 return b53_arl_op(priv, 0, port, addr, vid, false);
1637}
1638EXPORT_SYMBOL(b53_fdb_del);
1639
1640static int b53_arl_search_wait(struct b53_device *dev)
1641{
1642 unsigned int timeout = 1000;
1643 u8 reg;
1644
1645 do {
1646 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
1647 if (!(reg & ARL_SRCH_STDN))
1648 return 0;
1649
1650 if (reg & ARL_SRCH_VLID)
1651 return 0;
1652
1653 usleep_range(1000, 2000);
1654 } while (timeout--);
1655
1656 return -ETIMEDOUT;
1657}
1658
1659static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
1660 struct b53_arl_entry *ent)
1661{
1662 u64 mac_vid;
1663 u32 fwd_entry;
1664
1665 b53_read64(dev, B53_ARLIO_PAGE,
1666 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
1667 b53_read32(dev, B53_ARLIO_PAGE,
1668 B53_ARL_SRCH_RSTL(idx), &fwd_entry);
1669 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1670}
1671
1672static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
1673 dsa_fdb_dump_cb_t *cb, void *data)
1674{
1675 if (!ent->is_valid)
1676 return 0;
1677
1678 if (port != ent->port)
1679 return 0;
1680
1681 return cb(ent->mac, ent->vid, ent->is_static, data);
1682}
1683
1684int b53_fdb_dump(struct dsa_switch *ds, int port,
1685 dsa_fdb_dump_cb_t *cb, void *data)
1686{
1687 struct b53_device *priv = ds->priv;
1688 struct b53_arl_entry results[2];
1689 unsigned int count = 0;
1690 int ret;
1691 u8 reg;
1692
1693 /* Start search operation */
1694 reg = ARL_SRCH_STDN;
1695 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
1696
1697 do {
1698 ret = b53_arl_search_wait(priv);
1699 if (ret)
1700 return ret;
1701
1702 b53_arl_search_rd(priv, 0, &results[0]);
1703 ret = b53_fdb_copy(port, &results[0], cb, data);
1704 if (ret)
1705 return ret;
1706
1707 if (priv->num_arl_bins > 2) {
1708 b53_arl_search_rd(priv, 1, &results[1]);
1709 ret = b53_fdb_copy(port, &results[1], cb, data);
1710 if (ret)
1711 return ret;
1712
1713 if (!results[0].is_valid && !results[1].is_valid)
1714 break;
1715 }
1716
1717 } while (count++ < b53_max_arl_entries(priv) / 2);
1718
1719 return 0;
1720}
1721EXPORT_SYMBOL(b53_fdb_dump);
1722
1723int b53_mdb_prepare(struct dsa_switch *ds, int port,
1724 const struct switchdev_obj_port_mdb *mdb)
1725{
1726 struct b53_device *priv = ds->priv;
1727
1728 /* 5325 and 5365 require some more massaging, but could
1729 * be supported eventually
1730 */
1731 if (is5325(priv) || is5365(priv))
1732 return -EOPNOTSUPP;
1733
1734 return 0;
1735}
1736EXPORT_SYMBOL(b53_mdb_prepare);
1737
1738void b53_mdb_add(struct dsa_switch *ds, int port,
1739 const struct switchdev_obj_port_mdb *mdb)
1740{
1741 struct b53_device *priv = ds->priv;
1742 int ret;
1743
1744 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
1745 if (ret)
1746 dev_err(ds->dev, "failed to add MDB entry\n");
1747}
1748EXPORT_SYMBOL(b53_mdb_add);
1749
1750int b53_mdb_del(struct dsa_switch *ds, int port,
1751 const struct switchdev_obj_port_mdb *mdb)
1752{
1753 struct b53_device *priv = ds->priv;
1754 int ret;
1755
1756 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
1757 if (ret)
1758 dev_err(ds->dev, "failed to delete MDB entry\n");
1759
1760 return ret;
1761}
1762EXPORT_SYMBOL(b53_mdb_del);
1763
1764int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
1765{
1766 struct b53_device *dev = ds->priv;
1767 s8 cpu_port = ds->ports[port].cpu_dp->index;
1768 u16 pvlan, reg;
1769 unsigned int i;
1770
1771 /* On 7278, port 7 which connects to the ASP should only receive
1772 * traffic from matching CFP rules.
1773 */
1774 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
1775 return -EINVAL;
1776
1777 /* Make this port leave the all VLANs join since we will have proper
1778 * VLAN entries from now on
1779 */
1780 if (is58xx(dev)) {
1781 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
1782 reg &= ~BIT(port);
1783 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
1784 reg &= ~BIT(cpu_port);
1785 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1786 }
1787
1788 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1789
1790 b53_for_each_port(dev, i) {
1791 if (dsa_to_port(ds, i)->bridge_dev != br)
1792 continue;
1793
1794 /* Add this local port to the remote port VLAN control
1795 * membership and update the remote port bitmask
1796 */
1797 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1798 reg |= BIT(port);
1799 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1800 dev->ports[i].vlan_ctl_mask = reg;
1801
1802 pvlan |= BIT(i);
1803 }
1804
1805 /* Configure the local port VLAN control membership to include
1806 * remote ports and update the local port bitmask
1807 */
1808 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1809 dev->ports[port].vlan_ctl_mask = pvlan;
1810
1811 b53_port_set_learning(dev, port, true);
1812
1813 return 0;
1814}
1815EXPORT_SYMBOL(b53_br_join);
1816
1817void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
1818{
1819 struct b53_device *dev = ds->priv;
1820 struct b53_vlan *vl = &dev->vlans[0];
1821 s8 cpu_port = ds->ports[port].cpu_dp->index;
1822 unsigned int i;
1823 u16 pvlan, reg, pvid;
1824
1825 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1826
1827 b53_for_each_port(dev, i) {
1828 /* Don't touch the remaining ports */
1829 if (dsa_to_port(ds, i)->bridge_dev != br)
1830 continue;
1831
1832 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1833 reg &= ~BIT(port);
1834 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1835 dev->ports[port].vlan_ctl_mask = reg;
1836
1837 /* Prevent self removal to preserve isolation */
1838 if (port != i)
1839 pvlan &= ~BIT(i);
1840 }
1841
1842 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1843 dev->ports[port].vlan_ctl_mask = pvlan;
1844
1845 pvid = b53_default_pvid(dev);
1846
1847 /* Make this port join all VLANs without VLAN entries */
1848 if (is58xx(dev)) {
1849 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
1850 reg |= BIT(port);
1851 if (!(reg & BIT(cpu_port)))
1852 reg |= BIT(cpu_port);
1853 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1854 } else {
1855 b53_get_vlan_entry(dev, pvid, vl);
1856 vl->members |= BIT(port) | BIT(cpu_port);
1857 vl->untag |= BIT(port) | BIT(cpu_port);
1858 b53_set_vlan_entry(dev, pvid, vl);
1859 }
1860 b53_port_set_learning(dev, port, false);
1861}
1862EXPORT_SYMBOL(b53_br_leave);
1863
1864void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
1865{
1866 struct b53_device *dev = ds->priv;
1867 u8 hw_state;
1868 u8 reg;
1869
1870 switch (state) {
1871 case BR_STATE_DISABLED:
1872 hw_state = PORT_CTRL_DIS_STATE;
1873 break;
1874 case BR_STATE_LISTENING:
1875 hw_state = PORT_CTRL_LISTEN_STATE;
1876 break;
1877 case BR_STATE_LEARNING:
1878 hw_state = PORT_CTRL_LEARN_STATE;
1879 break;
1880 case BR_STATE_FORWARDING:
1881 hw_state = PORT_CTRL_FWD_STATE;
1882 break;
1883 case BR_STATE_BLOCKING:
1884 hw_state = PORT_CTRL_BLOCK_STATE;
1885 break;
1886 default:
1887 dev_err(ds->dev, "invalid STP state: %d\n", state);
1888 return;
1889 }
1890
1891 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
1892 reg &= ~PORT_CTRL_STP_STATE_MASK;
1893 reg |= hw_state;
1894 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
1895}
1896EXPORT_SYMBOL(b53_br_set_stp_state);
1897
1898void b53_br_fast_age(struct dsa_switch *ds, int port)
1899{
1900 struct b53_device *dev = ds->priv;
1901
1902 if (b53_fast_age_port(dev, port))
1903 dev_err(ds->dev, "fast ageing failed\n");
1904}
1905EXPORT_SYMBOL(b53_br_fast_age);
1906
1907int b53_br_egress_floods(struct dsa_switch *ds, int port,
1908 bool unicast, bool multicast)
1909{
1910 struct b53_device *dev = ds->priv;
1911 u16 uc, mc;
1912
1913 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
1914 if (unicast)
1915 uc |= BIT(port);
1916 else
1917 uc &= ~BIT(port);
1918 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
1919
1920 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
1921 if (multicast)
1922 mc |= BIT(port);
1923 else
1924 mc &= ~BIT(port);
1925 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
1926
1927 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
1928 if (multicast)
1929 mc |= BIT(port);
1930 else
1931 mc &= ~BIT(port);
1932 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
1933
1934 return 0;
1935
1936}
1937EXPORT_SYMBOL(b53_br_egress_floods);
1938
1939static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
1940{
1941 /* Broadcom switches will accept enabling Broadcom tags on the
1942 * following ports: 5, 7 and 8, any other port is not supported
1943 */
1944 switch (port) {
1945 case B53_CPU_PORT_25:
1946 case 7:
1947 case B53_CPU_PORT:
1948 return true;
1949 }
1950
1951 return false;
1952}
1953
1954static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
1955 enum dsa_tag_protocol tag_protocol)
1956{
1957 bool ret = b53_possible_cpu_port(ds, port);
1958
1959 if (!ret) {
1960 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
1961 port);
1962 return ret;
1963 }
1964
1965 switch (tag_protocol) {
1966 case DSA_TAG_PROTO_BRCM:
1967 case DSA_TAG_PROTO_BRCM_PREPEND:
1968 dev_warn(ds->dev,
1969 "Port %d is stacked to Broadcom tag switch\n", port);
1970 ret = false;
1971 break;
1972 default:
1973 ret = true;
1974 break;
1975 }
1976
1977 return ret;
1978}
1979
1980enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
1981 enum dsa_tag_protocol mprot)
1982{
1983 struct b53_device *dev = ds->priv;
1984
1985 /* Older models (5325, 5365) support a different tag format that we do
1986 * not support in net/dsa/tag_brcm.c yet.
1987 */
1988 if (is5325(dev) || is5365(dev) ||
1989 !b53_can_enable_brcm_tags(ds, port, mprot)) {
1990 dev->tag_protocol = DSA_TAG_PROTO_NONE;
1991 goto out;
1992 }
1993
1994 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
1995 * which requires us to use the prepended Broadcom tag type
1996 */
1997 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
1998 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
1999 goto out;
2000 }
2001
2002 dev->tag_protocol = DSA_TAG_PROTO_BRCM;
2003out:
2004 return dev->tag_protocol;
2005}
2006EXPORT_SYMBOL(b53_get_tag_protocol);
2007
2008int b53_mirror_add(struct dsa_switch *ds, int port,
2009 struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
2010{
2011 struct b53_device *dev = ds->priv;
2012 u16 reg, loc;
2013
2014 if (ingress)
2015 loc = B53_IG_MIR_CTL;
2016 else
2017 loc = B53_EG_MIR_CTL;
2018
2019 b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
2020 reg |= BIT(port);
2021 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2022
2023 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
2024 reg &= ~CAP_PORT_MASK;
2025 reg |= mirror->to_local_port;
2026 reg |= MIRROR_EN;
2027 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2028
2029 return 0;
2030}
2031EXPORT_SYMBOL(b53_mirror_add);
2032
2033void b53_mirror_del(struct dsa_switch *ds, int port,
2034 struct dsa_mall_mirror_tc_entry *mirror)
2035{
2036 struct b53_device *dev = ds->priv;
2037 bool loc_disable = false, other_loc_disable = false;
2038 u16 reg, loc;
2039
2040 if (mirror->ingress)
2041 loc = B53_IG_MIR_CTL;
2042 else
2043 loc = B53_EG_MIR_CTL;
2044
2045 /* Update the desired ingress/egress register */
2046 b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
2047 reg &= ~BIT(port);
2048 if (!(reg & MIRROR_MASK))
2049 loc_disable = true;
2050 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2051
2052 /* Now look at the other one to know if we can disable mirroring
2053 * entirely
2054 */
2055 if (mirror->ingress)
2056 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, &reg);
2057 else
2058 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, &reg);
2059 if (!(reg & MIRROR_MASK))
2060 other_loc_disable = true;
2061
2062 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
2063 /* Both no longer have ports, let's disable mirroring */
2064 if (loc_disable && other_loc_disable) {
2065 reg &= ~MIRROR_EN;
2066 reg &= ~mirror->to_local_port;
2067 }
2068 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2069}
2070EXPORT_SYMBOL(b53_mirror_del);
2071
2072void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
2073{
2074 struct b53_device *dev = ds->priv;
2075 u16 reg;
2076
2077 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
2078 if (enable)
2079 reg |= BIT(port);
2080 else
2081 reg &= ~BIT(port);
2082 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
2083}
2084EXPORT_SYMBOL(b53_eee_enable_set);
2085
2086
2087/* Returns 0 if EEE was not enabled, or 1 otherwise
2088 */
2089int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
2090{
2091 int ret;
2092
2093 ret = phy_init_eee(phy, 0);
2094 if (ret)
2095 return 0;
2096
2097 b53_eee_enable_set(ds, port, true);
2098
2099 return 1;
2100}
2101EXPORT_SYMBOL(b53_eee_init);
2102
2103int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2104{
2105 struct b53_device *dev = ds->priv;
2106 struct ethtool_eee *p = &dev->ports[port].eee;
2107 u16 reg;
2108
2109 if (is5325(dev) || is5365(dev))
2110 return -EOPNOTSUPP;
2111
2112 b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
2113 e->eee_enabled = p->eee_enabled;
2114 e->eee_active = !!(reg & BIT(port));
2115
2116 return 0;
2117}
2118EXPORT_SYMBOL(b53_get_mac_eee);
2119
2120int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2121{
2122 struct b53_device *dev = ds->priv;
2123 struct ethtool_eee *p = &dev->ports[port].eee;
2124
2125 if (is5325(dev) || is5365(dev))
2126 return -EOPNOTSUPP;
2127
2128 p->eee_enabled = e->eee_enabled;
2129 b53_eee_enable_set(ds, port, e->eee_enabled);
2130
2131 return 0;
2132}
2133EXPORT_SYMBOL(b53_set_mac_eee);
2134
2135static const struct dsa_switch_ops b53_switch_ops = {
2136 .get_tag_protocol = b53_get_tag_protocol,
2137 .setup = b53_setup,
2138 .get_strings = b53_get_strings,
2139 .get_ethtool_stats = b53_get_ethtool_stats,
2140 .get_sset_count = b53_get_sset_count,
2141 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
2142 .phy_read = b53_phy_read16,
2143 .phy_write = b53_phy_write16,
2144 .adjust_link = b53_adjust_link,
2145 .phylink_validate = b53_phylink_validate,
2146 .phylink_mac_link_state = b53_phylink_mac_link_state,
2147 .phylink_mac_config = b53_phylink_mac_config,
2148 .phylink_mac_an_restart = b53_phylink_mac_an_restart,
2149 .phylink_mac_link_down = b53_phylink_mac_link_down,
2150 .phylink_mac_link_up = b53_phylink_mac_link_up,
2151 .port_enable = b53_enable_port,
2152 .port_disable = b53_disable_port,
2153 .get_mac_eee = b53_get_mac_eee,
2154 .set_mac_eee = b53_set_mac_eee,
2155 .port_bridge_join = b53_br_join,
2156 .port_bridge_leave = b53_br_leave,
2157 .port_stp_state_set = b53_br_set_stp_state,
2158 .port_fast_age = b53_br_fast_age,
2159 .port_egress_floods = b53_br_egress_floods,
2160 .port_vlan_filtering = b53_vlan_filtering,
2161 .port_vlan_prepare = b53_vlan_prepare,
2162 .port_vlan_add = b53_vlan_add,
2163 .port_vlan_del = b53_vlan_del,
2164 .port_fdb_dump = b53_fdb_dump,
2165 .port_fdb_add = b53_fdb_add,
2166 .port_fdb_del = b53_fdb_del,
2167 .port_mirror_add = b53_mirror_add,
2168 .port_mirror_del = b53_mirror_del,
2169 .port_mdb_prepare = b53_mdb_prepare,
2170 .port_mdb_add = b53_mdb_add,
2171 .port_mdb_del = b53_mdb_del,
2172};
2173
2174struct b53_chip_data {
2175 u32 chip_id;
2176 const char *dev_name;
2177 u16 vlans;
2178 u16 enabled_ports;
2179 u8 cpu_port;
2180 u8 vta_regs[3];
2181 u8 arl_bins;
2182 u16 arl_buckets;
2183 u8 duplex_reg;
2184 u8 jumbo_pm_reg;
2185 u8 jumbo_size_reg;
2186};
2187
2188#define B53_VTA_REGS \
2189 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
2190#define B53_VTA_REGS_9798 \
2191 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
2192#define B53_VTA_REGS_63XX \
2193 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
2194
2195static const struct b53_chip_data b53_switch_chips[] = {
2196 {
2197 .chip_id = BCM5325_DEVICE_ID,
2198 .dev_name = "BCM5325",
2199 .vlans = 16,
2200 .enabled_ports = 0x1f,
2201 .arl_bins = 2,
2202 .arl_buckets = 1024,
2203 .cpu_port = B53_CPU_PORT_25,
2204 .duplex_reg = B53_DUPLEX_STAT_FE,
2205 },
2206 {
2207 .chip_id = BCM5365_DEVICE_ID,
2208 .dev_name = "BCM5365",
2209 .vlans = 256,
2210 .enabled_ports = 0x1f,
2211 .arl_bins = 2,
2212 .arl_buckets = 1024,
2213 .cpu_port = B53_CPU_PORT_25,
2214 .duplex_reg = B53_DUPLEX_STAT_FE,
2215 },
2216 {
2217 .chip_id = BCM5389_DEVICE_ID,
2218 .dev_name = "BCM5389",
2219 .vlans = 4096,
2220 .enabled_ports = 0x1f,
2221 .arl_bins = 4,
2222 .arl_buckets = 1024,
2223 .cpu_port = B53_CPU_PORT,
2224 .vta_regs = B53_VTA_REGS,
2225 .duplex_reg = B53_DUPLEX_STAT_GE,
2226 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2227 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2228 },
2229 {
2230 .chip_id = BCM5395_DEVICE_ID,
2231 .dev_name = "BCM5395",
2232 .vlans = 4096,
2233 .enabled_ports = 0x1f,
2234 .arl_bins = 4,
2235 .arl_buckets = 1024,
2236 .cpu_port = B53_CPU_PORT,
2237 .vta_regs = B53_VTA_REGS,
2238 .duplex_reg = B53_DUPLEX_STAT_GE,
2239 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2240 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2241 },
2242 {
2243 .chip_id = BCM5397_DEVICE_ID,
2244 .dev_name = "BCM5397",
2245 .vlans = 4096,
2246 .enabled_ports = 0x1f,
2247 .arl_bins = 4,
2248 .arl_buckets = 1024,
2249 .cpu_port = B53_CPU_PORT,
2250 .vta_regs = B53_VTA_REGS_9798,
2251 .duplex_reg = B53_DUPLEX_STAT_GE,
2252 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2253 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2254 },
2255 {
2256 .chip_id = BCM5398_DEVICE_ID,
2257 .dev_name = "BCM5398",
2258 .vlans = 4096,
2259 .enabled_ports = 0x7f,
2260 .arl_bins = 4,
2261 .arl_buckets = 1024,
2262 .cpu_port = B53_CPU_PORT,
2263 .vta_regs = B53_VTA_REGS_9798,
2264 .duplex_reg = B53_DUPLEX_STAT_GE,
2265 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2266 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2267 },
2268 {
2269 .chip_id = BCM53115_DEVICE_ID,
2270 .dev_name = "BCM53115",
2271 .vlans = 4096,
2272 .enabled_ports = 0x1f,
2273 .arl_bins = 4,
2274 .arl_buckets = 1024,
2275 .vta_regs = B53_VTA_REGS,
2276 .cpu_port = B53_CPU_PORT,
2277 .duplex_reg = B53_DUPLEX_STAT_GE,
2278 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2279 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2280 },
2281 {
2282 .chip_id = BCM53125_DEVICE_ID,
2283 .dev_name = "BCM53125",
2284 .vlans = 4096,
2285 .enabled_ports = 0xff,
2286 .arl_bins = 4,
2287 .arl_buckets = 1024,
2288 .cpu_port = B53_CPU_PORT,
2289 .vta_regs = B53_VTA_REGS,
2290 .duplex_reg = B53_DUPLEX_STAT_GE,
2291 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2292 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2293 },
2294 {
2295 .chip_id = BCM53128_DEVICE_ID,
2296 .dev_name = "BCM53128",
2297 .vlans = 4096,
2298 .enabled_ports = 0x1ff,
2299 .arl_bins = 4,
2300 .arl_buckets = 1024,
2301 .cpu_port = B53_CPU_PORT,
2302 .vta_regs = B53_VTA_REGS,
2303 .duplex_reg = B53_DUPLEX_STAT_GE,
2304 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2305 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2306 },
2307 {
2308 .chip_id = BCM63XX_DEVICE_ID,
2309 .dev_name = "BCM63xx",
2310 .vlans = 4096,
2311 .enabled_ports = 0, /* pdata must provide them */
2312 .arl_bins = 4,
2313 .arl_buckets = 1024,
2314 .cpu_port = B53_CPU_PORT,
2315 .vta_regs = B53_VTA_REGS_63XX,
2316 .duplex_reg = B53_DUPLEX_STAT_63XX,
2317 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2318 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2319 },
2320 {
2321 .chip_id = BCM53010_DEVICE_ID,
2322 .dev_name = "BCM53010",
2323 .vlans = 4096,
2324 .enabled_ports = 0x1f,
2325 .arl_bins = 4,
2326 .arl_buckets = 1024,
2327 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2328 .vta_regs = B53_VTA_REGS,
2329 .duplex_reg = B53_DUPLEX_STAT_GE,
2330 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2331 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2332 },
2333 {
2334 .chip_id = BCM53011_DEVICE_ID,
2335 .dev_name = "BCM53011",
2336 .vlans = 4096,
2337 .enabled_ports = 0x1bf,
2338 .arl_bins = 4,
2339 .arl_buckets = 1024,
2340 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2341 .vta_regs = B53_VTA_REGS,
2342 .duplex_reg = B53_DUPLEX_STAT_GE,
2343 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2344 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2345 },
2346 {
2347 .chip_id = BCM53012_DEVICE_ID,
2348 .dev_name = "BCM53012",
2349 .vlans = 4096,
2350 .enabled_ports = 0x1bf,
2351 .arl_bins = 4,
2352 .arl_buckets = 1024,
2353 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2354 .vta_regs = B53_VTA_REGS,
2355 .duplex_reg = B53_DUPLEX_STAT_GE,
2356 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2357 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2358 },
2359 {
2360 .chip_id = BCM53018_DEVICE_ID,
2361 .dev_name = "BCM53018",
2362 .vlans = 4096,
2363 .enabled_ports = 0x1f,
2364 .arl_bins = 4,
2365 .arl_buckets = 1024,
2366 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2367 .vta_regs = B53_VTA_REGS,
2368 .duplex_reg = B53_DUPLEX_STAT_GE,
2369 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2370 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2371 },
2372 {
2373 .chip_id = BCM53019_DEVICE_ID,
2374 .dev_name = "BCM53019",
2375 .vlans = 4096,
2376 .enabled_ports = 0x1f,
2377 .arl_bins = 4,
2378 .arl_buckets = 1024,
2379 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2380 .vta_regs = B53_VTA_REGS,
2381 .duplex_reg = B53_DUPLEX_STAT_GE,
2382 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2383 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2384 },
2385 {
2386 .chip_id = BCM58XX_DEVICE_ID,
2387 .dev_name = "BCM585xx/586xx/88312",
2388 .vlans = 4096,
2389 .enabled_ports = 0x1ff,
2390 .arl_bins = 4,
2391 .arl_buckets = 1024,
2392 .cpu_port = B53_CPU_PORT,
2393 .vta_regs = B53_VTA_REGS,
2394 .duplex_reg = B53_DUPLEX_STAT_GE,
2395 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2396 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2397 },
2398 {
2399 .chip_id = BCM583XX_DEVICE_ID,
2400 .dev_name = "BCM583xx/11360",
2401 .vlans = 4096,
2402 .enabled_ports = 0x103,
2403 .arl_bins = 4,
2404 .arl_buckets = 1024,
2405 .cpu_port = B53_CPU_PORT,
2406 .vta_regs = B53_VTA_REGS,
2407 .duplex_reg = B53_DUPLEX_STAT_GE,
2408 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2409 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2410 },
2411 {
2412 .chip_id = BCM7445_DEVICE_ID,
2413 .dev_name = "BCM7445",
2414 .vlans = 4096,
2415 .enabled_ports = 0x1ff,
2416 .arl_bins = 4,
2417 .arl_buckets = 1024,
2418 .cpu_port = B53_CPU_PORT,
2419 .vta_regs = B53_VTA_REGS,
2420 .duplex_reg = B53_DUPLEX_STAT_GE,
2421 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2422 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2423 },
2424 {
2425 .chip_id = BCM7278_DEVICE_ID,
2426 .dev_name = "BCM7278",
2427 .vlans = 4096,
2428 .enabled_ports = 0x1ff,
2429 .arl_bins = 4,
2430 .arl_buckets = 256,
2431 .cpu_port = B53_CPU_PORT,
2432 .vta_regs = B53_VTA_REGS,
2433 .duplex_reg = B53_DUPLEX_STAT_GE,
2434 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2435 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2436 },
2437};
2438
2439static int b53_switch_init(struct b53_device *dev)
2440{
2441 unsigned int i;
2442 int ret;
2443
2444 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
2445 const struct b53_chip_data *chip = &b53_switch_chips[i];
2446
2447 if (chip->chip_id == dev->chip_id) {
2448 if (!dev->enabled_ports)
2449 dev->enabled_ports = chip->enabled_ports;
2450 dev->name = chip->dev_name;
2451 dev->duplex_reg = chip->duplex_reg;
2452 dev->vta_regs[0] = chip->vta_regs[0];
2453 dev->vta_regs[1] = chip->vta_regs[1];
2454 dev->vta_regs[2] = chip->vta_regs[2];
2455 dev->jumbo_pm_reg = chip->jumbo_pm_reg;
2456 dev->cpu_port = chip->cpu_port;
2457 dev->num_vlans = chip->vlans;
2458 dev->num_arl_bins = chip->arl_bins;
2459 dev->num_arl_buckets = chip->arl_buckets;
2460 break;
2461 }
2462 }
2463
2464 /* check which BCM5325x version we have */
2465 if (is5325(dev)) {
2466 u8 vc4;
2467
2468 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
2469
2470 /* check reserved bits */
2471 switch (vc4 & 3) {
2472 case 1:
2473 /* BCM5325E */
2474 break;
2475 case 3:
2476 /* BCM5325F - do not use port 4 */
2477 dev->enabled_ports &= ~BIT(4);
2478 break;
2479 default:
2480/* On the BCM47XX SoCs this is the supported internal switch.*/
2481#ifndef CONFIG_BCM47XX
2482 /* BCM5325M */
2483 return -EINVAL;
2484#else
2485 break;
2486#endif
2487 }
2488 } else if (dev->chip_id == BCM53115_DEVICE_ID) {
2489 u64 strap_value;
2490
2491 b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
2492 /* use second IMP port if GMII is enabled */
2493 if (strap_value & SV_GMII_CTRL_115)
2494 dev->cpu_port = 5;
2495 }
2496
2497 dev->enabled_ports |= BIT(dev->cpu_port);
2498 dev->num_ports = fls(dev->enabled_ports);
2499
2500 /* Include non standard CPU port built-in PHYs to be probed */
2501 if (is539x(dev) || is531x5(dev)) {
2502 for (i = 0; i < dev->num_ports; i++) {
2503 if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2504 !b53_possible_cpu_port(dev->ds, i))
2505 dev->ds->phys_mii_mask |= BIT(i);
2506 }
2507 }
2508
2509 dev->ports = devm_kcalloc(dev->dev,
2510 dev->num_ports, sizeof(struct b53_port),
2511 GFP_KERNEL);
2512 if (!dev->ports)
2513 return -ENOMEM;
2514
2515 dev->vlans = devm_kcalloc(dev->dev,
2516 dev->num_vlans, sizeof(struct b53_vlan),
2517 GFP_KERNEL);
2518 if (!dev->vlans)
2519 return -ENOMEM;
2520
2521 dev->reset_gpio = b53_switch_get_reset_gpio(dev);
2522 if (dev->reset_gpio >= 0) {
2523 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
2524 GPIOF_OUT_INIT_HIGH, "robo_reset");
2525 if (ret)
2526 return ret;
2527 }
2528
2529 return 0;
2530}
2531
2532struct b53_device *b53_switch_alloc(struct device *base,
2533 const struct b53_io_ops *ops,
2534 void *priv)
2535{
2536 struct dsa_switch *ds;
2537 struct b53_device *dev;
2538
2539 ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
2540 if (!ds)
2541 return NULL;
2542
2543 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
2544 if (!dev)
2545 return NULL;
2546
2547 ds->priv = dev;
2548 dev->dev = base;
2549
2550 dev->ds = ds;
2551 dev->priv = priv;
2552 dev->ops = ops;
2553 ds->ops = &b53_switch_ops;
2554 /* Let DSA handle the case were multiple bridges span the same switch
2555 * device and different VLAN awareness settings are requested, which
2556 * would be breaking filtering semantics for any of the other bridge
2557 * devices. (not hardware supported)
2558 */
2559 ds->vlan_filtering_is_global = true;
2560
2561 mutex_init(&dev->reg_mutex);
2562 mutex_init(&dev->stats_mutex);
2563
2564 return dev;
2565}
2566EXPORT_SYMBOL(b53_switch_alloc);
2567
2568int b53_switch_detect(struct b53_device *dev)
2569{
2570 u32 id32;
2571 u16 tmp;
2572 u8 id8;
2573 int ret;
2574
2575 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
2576 if (ret)
2577 return ret;
2578
2579 switch (id8) {
2580 case 0:
2581 /* BCM5325 and BCM5365 do not have this register so reads
2582 * return 0. But the read operation did succeed, so assume this
2583 * is one of them.
2584 *
2585 * Next check if we can write to the 5325's VTA register; for
2586 * 5365 it is read only.
2587 */
2588 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
2589 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
2590
2591 if (tmp == 0xf)
2592 dev->chip_id = BCM5325_DEVICE_ID;
2593 else
2594 dev->chip_id = BCM5365_DEVICE_ID;
2595 break;
2596 case BCM5389_DEVICE_ID:
2597 case BCM5395_DEVICE_ID:
2598 case BCM5397_DEVICE_ID:
2599 case BCM5398_DEVICE_ID:
2600 dev->chip_id = id8;
2601 break;
2602 default:
2603 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
2604 if (ret)
2605 return ret;
2606
2607 switch (id32) {
2608 case BCM53115_DEVICE_ID:
2609 case BCM53125_DEVICE_ID:
2610 case BCM53128_DEVICE_ID:
2611 case BCM53010_DEVICE_ID:
2612 case BCM53011_DEVICE_ID:
2613 case BCM53012_DEVICE_ID:
2614 case BCM53018_DEVICE_ID:
2615 case BCM53019_DEVICE_ID:
2616 dev->chip_id = id32;
2617 break;
2618 default:
2619 pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
2620 id8, id32);
2621 return -ENODEV;
2622 }
2623 }
2624
2625 if (dev->chip_id == BCM5325_DEVICE_ID)
2626 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
2627 &dev->core_rev);
2628 else
2629 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
2630 &dev->core_rev);
2631}
2632EXPORT_SYMBOL(b53_switch_detect);
2633
2634int b53_switch_register(struct b53_device *dev)
2635{
2636 int ret;
2637
2638 if (dev->pdata) {
2639 dev->chip_id = dev->pdata->chip_id;
2640 dev->enabled_ports = dev->pdata->enabled_ports;
2641 }
2642
2643 if (!dev->chip_id && b53_switch_detect(dev))
2644 return -EINVAL;
2645
2646 ret = b53_switch_init(dev);
2647 if (ret)
2648 return ret;
2649
2650 pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
2651
2652 return dsa_register_switch(dev->ds);
2653}
2654EXPORT_SYMBOL(b53_switch_register);
2655
2656MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
2657MODULE_DESCRIPTION("B53 switch library");
2658MODULE_LICENSE("Dual BSD/GPL");