blob: 6710f6b8764bee11c71267cad4c6140e03ce44b8 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright Gavin Shan, IBM Corporation 2016.
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11
12#include <net/ncsi.h>
13#include <net/net_namespace.h>
14#include <net/sock.h>
15#include <net/addrconf.h>
16#include <net/ipv6.h>
17#include <net/genetlink.h>
18
19#include "internal.h"
20#include "ncsi-pkt.h"
21#include "ncsi-netlink.h"
22
23LIST_HEAD(ncsi_dev_list);
24DEFINE_SPINLOCK(ncsi_dev_lock);
25
26bool ncsi_channel_has_link(struct ncsi_channel *channel)
27{
28 return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
29}
30
31bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
32 struct ncsi_channel *channel)
33{
34 struct ncsi_package *np;
35 struct ncsi_channel *nc;
36
37 NCSI_FOR_EACH_PACKAGE(ndp, np)
38 NCSI_FOR_EACH_CHANNEL(np, nc) {
39 if (nc == channel)
40 continue;
41 if (nc->state == NCSI_CHANNEL_ACTIVE &&
42 ncsi_channel_has_link(nc))
43 return false;
44 }
45
46 return true;
47}
48
49static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
50{
51 struct ncsi_dev *nd = &ndp->ndev;
52 struct ncsi_package *np;
53 struct ncsi_channel *nc;
54 unsigned long flags;
55
56 nd->state = ncsi_dev_state_functional;
57 if (force_down) {
58 nd->link_up = 0;
59 goto report;
60 }
61
62 nd->link_up = 0;
63 NCSI_FOR_EACH_PACKAGE(ndp, np) {
64 NCSI_FOR_EACH_CHANNEL(np, nc) {
65 spin_lock_irqsave(&nc->lock, flags);
66
67 if (!list_empty(&nc->link) ||
68 nc->state != NCSI_CHANNEL_ACTIVE) {
69 spin_unlock_irqrestore(&nc->lock, flags);
70 continue;
71 }
72
73 if (ncsi_channel_has_link(nc)) {
74 spin_unlock_irqrestore(&nc->lock, flags);
75 nd->link_up = 1;
76 goto report;
77 }
78
79 spin_unlock_irqrestore(&nc->lock, flags);
80 }
81 }
82
83report:
84 nd->handler(nd);
85}
86
87static void ncsi_channel_monitor(struct timer_list *t)
88{
89 struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
90 struct ncsi_package *np = nc->package;
91 struct ncsi_dev_priv *ndp = np->ndp;
92 struct ncsi_channel_mode *ncm;
93 struct ncsi_cmd_arg nca;
94 bool enabled, chained;
95 unsigned int monitor_state;
96 unsigned long flags;
97 int state, ret;
98
99 spin_lock_irqsave(&nc->lock, flags);
100 state = nc->state;
101 chained = !list_empty(&nc->link);
102 enabled = nc->monitor.enabled;
103 monitor_state = nc->monitor.state;
104 spin_unlock_irqrestore(&nc->lock, flags);
105
106 if (!enabled)
107 return; /* expected race disabling timer */
108 if (WARN_ON_ONCE(chained))
109 goto bad_state;
110
111 if (state != NCSI_CHANNEL_INACTIVE &&
112 state != NCSI_CHANNEL_ACTIVE) {
113bad_state:
114 netdev_warn(ndp->ndev.dev,
115 "Bad NCSI monitor state channel %d 0x%x %s queue\n",
116 nc->id, state, chained ? "on" : "off");
117 spin_lock_irqsave(&nc->lock, flags);
118 nc->monitor.enabled = false;
119 spin_unlock_irqrestore(&nc->lock, flags);
120 return;
121 }
122
123 switch (monitor_state) {
124 case NCSI_CHANNEL_MONITOR_START:
125 case NCSI_CHANNEL_MONITOR_RETRY:
126 nca.ndp = ndp;
127 nca.package = np->id;
128 nca.channel = nc->id;
129 nca.type = NCSI_PKT_CMD_GLS;
130 nca.req_flags = 0;
131 ret = ncsi_xmit_cmd(&nca);
132 if (ret)
133 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
134 ret);
135 break;
136 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
137 break;
138 default:
139 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
140 nc->id);
141 ncsi_report_link(ndp, true);
142 ndp->flags |= NCSI_DEV_RESHUFFLE;
143
144 ncm = &nc->modes[NCSI_MODE_LINK];
145 spin_lock_irqsave(&nc->lock, flags);
146 nc->monitor.enabled = false;
147 nc->state = NCSI_CHANNEL_INVISIBLE;
148 ncm->data[2] &= ~0x1;
149 spin_unlock_irqrestore(&nc->lock, flags);
150
151 spin_lock_irqsave(&ndp->lock, flags);
152 nc->state = NCSI_CHANNEL_ACTIVE;
153 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
154 spin_unlock_irqrestore(&ndp->lock, flags);
155 ncsi_process_next_channel(ndp);
156 return;
157 }
158
159 spin_lock_irqsave(&nc->lock, flags);
160 nc->monitor.state++;
161 spin_unlock_irqrestore(&nc->lock, flags);
162 mod_timer(&nc->monitor.timer, jiffies + HZ);
163}
164
165void ncsi_start_channel_monitor(struct ncsi_channel *nc)
166{
167 unsigned long flags;
168
169 spin_lock_irqsave(&nc->lock, flags);
170 WARN_ON_ONCE(nc->monitor.enabled);
171 nc->monitor.enabled = true;
172 nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
173 spin_unlock_irqrestore(&nc->lock, flags);
174
175 mod_timer(&nc->monitor.timer, jiffies + HZ);
176}
177
178void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
179{
180 unsigned long flags;
181
182 spin_lock_irqsave(&nc->lock, flags);
183 if (!nc->monitor.enabled) {
184 spin_unlock_irqrestore(&nc->lock, flags);
185 return;
186 }
187 nc->monitor.enabled = false;
188 spin_unlock_irqrestore(&nc->lock, flags);
189
190 del_timer_sync(&nc->monitor.timer);
191}
192
193struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
194 unsigned char id)
195{
196 struct ncsi_channel *nc;
197
198 NCSI_FOR_EACH_CHANNEL(np, nc) {
199 if (nc->id == id)
200 return nc;
201 }
202
203 return NULL;
204}
205
206struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
207{
208 struct ncsi_channel *nc, *tmp;
209 int index;
210 unsigned long flags;
211
212 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
213 if (!nc)
214 return NULL;
215
216 nc->id = id;
217 nc->package = np;
218 nc->state = NCSI_CHANNEL_INACTIVE;
219 nc->monitor.enabled = false;
220 timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
221 spin_lock_init(&nc->lock);
222 INIT_LIST_HEAD(&nc->link);
223 for (index = 0; index < NCSI_CAP_MAX; index++)
224 nc->caps[index].index = index;
225 for (index = 0; index < NCSI_MODE_MAX; index++)
226 nc->modes[index].index = index;
227
228 spin_lock_irqsave(&np->lock, flags);
229 tmp = ncsi_find_channel(np, id);
230 if (tmp) {
231 spin_unlock_irqrestore(&np->lock, flags);
232 kfree(nc);
233 return tmp;
234 }
235
236 list_add_tail_rcu(&nc->node, &np->channels);
237 np->channel_num++;
238 spin_unlock_irqrestore(&np->lock, flags);
239
240 return nc;
241}
242
243static void ncsi_remove_channel(struct ncsi_channel *nc)
244{
245 struct ncsi_package *np = nc->package;
246 unsigned long flags;
247
248 spin_lock_irqsave(&nc->lock, flags);
249
250 /* Release filters */
251 kfree(nc->mac_filter.addrs);
252 kfree(nc->vlan_filter.vids);
253
254 nc->state = NCSI_CHANNEL_INACTIVE;
255 spin_unlock_irqrestore(&nc->lock, flags);
256 ncsi_stop_channel_monitor(nc);
257
258 /* Remove and free channel */
259 spin_lock_irqsave(&np->lock, flags);
260 list_del_rcu(&nc->node);
261 np->channel_num--;
262 spin_unlock_irqrestore(&np->lock, flags);
263
264 kfree(nc);
265}
266
267struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
268 unsigned char id)
269{
270 struct ncsi_package *np;
271
272 NCSI_FOR_EACH_PACKAGE(ndp, np) {
273 if (np->id == id)
274 return np;
275 }
276
277 return NULL;
278}
279
280struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
281 unsigned char id)
282{
283 struct ncsi_package *np, *tmp;
284 unsigned long flags;
285
286 np = kzalloc(sizeof(*np), GFP_ATOMIC);
287 if (!np)
288 return NULL;
289
290 np->id = id;
291 np->ndp = ndp;
292 spin_lock_init(&np->lock);
293 INIT_LIST_HEAD(&np->channels);
294 np->channel_whitelist = UINT_MAX;
295
296 spin_lock_irqsave(&ndp->lock, flags);
297 tmp = ncsi_find_package(ndp, id);
298 if (tmp) {
299 spin_unlock_irqrestore(&ndp->lock, flags);
300 kfree(np);
301 return tmp;
302 }
303
304 list_add_tail_rcu(&np->node, &ndp->packages);
305 ndp->package_num++;
306 spin_unlock_irqrestore(&ndp->lock, flags);
307
308 return np;
309}
310
311void ncsi_remove_package(struct ncsi_package *np)
312{
313 struct ncsi_dev_priv *ndp = np->ndp;
314 struct ncsi_channel *nc, *tmp;
315 unsigned long flags;
316
317 /* Release all child channels */
318 list_for_each_entry_safe(nc, tmp, &np->channels, node)
319 ncsi_remove_channel(nc);
320
321 /* Remove and free package */
322 spin_lock_irqsave(&ndp->lock, flags);
323 list_del_rcu(&np->node);
324 ndp->package_num--;
325 spin_unlock_irqrestore(&ndp->lock, flags);
326
327 kfree(np);
328}
329
330void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
331 unsigned char id,
332 struct ncsi_package **np,
333 struct ncsi_channel **nc)
334{
335 struct ncsi_package *p;
336 struct ncsi_channel *c;
337
338 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
339 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
340
341 if (np)
342 *np = p;
343 if (nc)
344 *nc = c;
345}
346
347/* For two consecutive NCSI commands, the packet IDs shouldn't
348 * be same. Otherwise, the bogus response might be replied. So
349 * the available IDs are allocated in round-robin fashion.
350 */
351struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
352 unsigned int req_flags)
353{
354 struct ncsi_request *nr = NULL;
355 int i, limit = ARRAY_SIZE(ndp->requests);
356 unsigned long flags;
357
358 /* Check if there is one available request until the ceiling */
359 spin_lock_irqsave(&ndp->lock, flags);
360 for (i = ndp->request_id; i < limit; i++) {
361 if (ndp->requests[i].used)
362 continue;
363
364 nr = &ndp->requests[i];
365 nr->used = true;
366 nr->flags = req_flags;
367 ndp->request_id = i + 1;
368 goto found;
369 }
370
371 /* Fail back to check from the starting cursor */
372 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
373 if (ndp->requests[i].used)
374 continue;
375
376 nr = &ndp->requests[i];
377 nr->used = true;
378 nr->flags = req_flags;
379 ndp->request_id = i + 1;
380 goto found;
381 }
382
383found:
384 spin_unlock_irqrestore(&ndp->lock, flags);
385 return nr;
386}
387
388void ncsi_free_request(struct ncsi_request *nr)
389{
390 struct ncsi_dev_priv *ndp = nr->ndp;
391 struct sk_buff *cmd, *rsp;
392 unsigned long flags;
393 bool driven;
394
395 if (nr->enabled) {
396 nr->enabled = false;
397 del_timer_sync(&nr->timer);
398 }
399
400 spin_lock_irqsave(&ndp->lock, flags);
401 cmd = nr->cmd;
402 rsp = nr->rsp;
403 nr->cmd = NULL;
404 nr->rsp = NULL;
405 nr->used = false;
406 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
407 spin_unlock_irqrestore(&ndp->lock, flags);
408
409 if (driven && cmd && --ndp->pending_req_num == 0)
410 schedule_work(&ndp->work);
411
412 /* Release command and response */
413 consume_skb(cmd);
414 consume_skb(rsp);
415}
416
417struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
418{
419 struct ncsi_dev_priv *ndp;
420
421 NCSI_FOR_EACH_DEV(ndp) {
422 if (ndp->ndev.dev == dev)
423 return &ndp->ndev;
424 }
425
426 return NULL;
427}
428
429static void ncsi_request_timeout(struct timer_list *t)
430{
431 struct ncsi_request *nr = from_timer(nr, t, timer);
432 struct ncsi_dev_priv *ndp = nr->ndp;
433 struct ncsi_cmd_pkt *cmd;
434 struct ncsi_package *np;
435 struct ncsi_channel *nc;
436 unsigned long flags;
437
438 /* If the request already had associated response,
439 * let the response handler to release it.
440 */
441 spin_lock_irqsave(&ndp->lock, flags);
442 nr->enabled = false;
443 if (nr->rsp || !nr->cmd) {
444 spin_unlock_irqrestore(&ndp->lock, flags);
445 return;
446 }
447 spin_unlock_irqrestore(&ndp->lock, flags);
448
449 if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
450 if (nr->cmd) {
451 /* Find the package */
452 cmd = (struct ncsi_cmd_pkt *)
453 skb_network_header(nr->cmd);
454 ncsi_find_package_and_channel(ndp,
455 cmd->cmd.common.channel,
456 &np, &nc);
457 ncsi_send_netlink_timeout(nr, np, nc);
458 }
459 }
460
461 /* Release the request */
462 ncsi_free_request(nr);
463}
464
465static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
466{
467 struct ncsi_dev *nd = &ndp->ndev;
468 struct ncsi_package *np;
469 struct ncsi_channel *nc, *tmp;
470 struct ncsi_cmd_arg nca;
471 unsigned long flags;
472 int ret;
473
474 np = ndp->active_package;
475 nc = ndp->active_channel;
476 nca.ndp = ndp;
477 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
478 switch (nd->state) {
479 case ncsi_dev_state_suspend:
480 nd->state = ncsi_dev_state_suspend_select;
481 /* Fall through */
482 case ncsi_dev_state_suspend_select:
483 ndp->pending_req_num = 1;
484
485 nca.type = NCSI_PKT_CMD_SP;
486 nca.package = np->id;
487 nca.channel = NCSI_RESERVED_CHANNEL;
488 if (ndp->flags & NCSI_DEV_HWA)
489 nca.bytes[0] = 0;
490 else
491 nca.bytes[0] = 1;
492
493 /* To retrieve the last link states of channels in current
494 * package when current active channel needs fail over to
495 * another one. It means we will possibly select another
496 * channel as next active one. The link states of channels
497 * are most important factor of the selection. So we need
498 * accurate link states. Unfortunately, the link states on
499 * inactive channels can't be updated with LSC AEN in time.
500 */
501 if (ndp->flags & NCSI_DEV_RESHUFFLE)
502 nd->state = ncsi_dev_state_suspend_gls;
503 else
504 nd->state = ncsi_dev_state_suspend_dcnt;
505 ret = ncsi_xmit_cmd(&nca);
506 if (ret)
507 goto error;
508
509 break;
510 case ncsi_dev_state_suspend_gls:
511 ndp->pending_req_num = np->channel_num;
512
513 nca.type = NCSI_PKT_CMD_GLS;
514 nca.package = np->id;
515
516 nd->state = ncsi_dev_state_suspend_dcnt;
517 NCSI_FOR_EACH_CHANNEL(np, nc) {
518 nca.channel = nc->id;
519 ret = ncsi_xmit_cmd(&nca);
520 if (ret)
521 goto error;
522 }
523
524 break;
525 case ncsi_dev_state_suspend_dcnt:
526 ndp->pending_req_num = 1;
527
528 nca.type = NCSI_PKT_CMD_DCNT;
529 nca.package = np->id;
530 nca.channel = nc->id;
531
532 nd->state = ncsi_dev_state_suspend_dc;
533 ret = ncsi_xmit_cmd(&nca);
534 if (ret)
535 goto error;
536
537 break;
538 case ncsi_dev_state_suspend_dc:
539 ndp->pending_req_num = 1;
540
541 nca.type = NCSI_PKT_CMD_DC;
542 nca.package = np->id;
543 nca.channel = nc->id;
544 nca.bytes[0] = 1;
545
546 nd->state = ncsi_dev_state_suspend_deselect;
547 ret = ncsi_xmit_cmd(&nca);
548 if (ret)
549 goto error;
550
551 NCSI_FOR_EACH_CHANNEL(np, tmp) {
552 /* If there is another channel active on this package
553 * do not deselect the package.
554 */
555 if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
556 nd->state = ncsi_dev_state_suspend_done;
557 break;
558 }
559 }
560 break;
561 case ncsi_dev_state_suspend_deselect:
562 ndp->pending_req_num = 1;
563
564 nca.type = NCSI_PKT_CMD_DP;
565 nca.package = np->id;
566 nca.channel = NCSI_RESERVED_CHANNEL;
567
568 nd->state = ncsi_dev_state_suspend_done;
569 ret = ncsi_xmit_cmd(&nca);
570 if (ret)
571 goto error;
572
573 break;
574 case ncsi_dev_state_suspend_done:
575 spin_lock_irqsave(&nc->lock, flags);
576 nc->state = NCSI_CHANNEL_INACTIVE;
577 spin_unlock_irqrestore(&nc->lock, flags);
578 if (ndp->flags & NCSI_DEV_RESET)
579 ncsi_reset_dev(nd);
580 else
581 ncsi_process_next_channel(ndp);
582 break;
583 default:
584 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
585 nd->state);
586 }
587
588 return;
589error:
590 nd->state = ncsi_dev_state_functional;
591}
592
593/* Check the VLAN filter bitmap for a set filter, and construct a
594 * "Set VLAN Filter - Disable" packet if found.
595 */
596static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
597 struct ncsi_cmd_arg *nca)
598{
599 struct ncsi_channel_vlan_filter *ncf;
600 unsigned long flags;
601 void *bitmap;
602 int index;
603 u16 vid;
604
605 ncf = &nc->vlan_filter;
606 bitmap = &ncf->bitmap;
607
608 spin_lock_irqsave(&nc->lock, flags);
609 index = find_next_bit(bitmap, ncf->n_vids, 0);
610 if (index >= ncf->n_vids) {
611 spin_unlock_irqrestore(&nc->lock, flags);
612 return -1;
613 }
614 vid = ncf->vids[index];
615
616 clear_bit(index, bitmap);
617 ncf->vids[index] = 0;
618 spin_unlock_irqrestore(&nc->lock, flags);
619
620 nca->type = NCSI_PKT_CMD_SVF;
621 nca->words[1] = vid;
622 /* HW filter index starts at 1 */
623 nca->bytes[6] = index + 1;
624 nca->bytes[7] = 0x00;
625 return 0;
626}
627
628/* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
629 * packet.
630 */
631static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
632 struct ncsi_cmd_arg *nca)
633{
634 struct ncsi_channel_vlan_filter *ncf;
635 struct vlan_vid *vlan = NULL;
636 unsigned long flags;
637 int i, index;
638 void *bitmap;
639 u16 vid;
640
641 if (list_empty(&ndp->vlan_vids))
642 return -1;
643
644 ncf = &nc->vlan_filter;
645 bitmap = &ncf->bitmap;
646
647 spin_lock_irqsave(&nc->lock, flags);
648
649 rcu_read_lock();
650 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
651 vid = vlan->vid;
652 for (i = 0; i < ncf->n_vids; i++)
653 if (ncf->vids[i] == vid) {
654 vid = 0;
655 break;
656 }
657 if (vid)
658 break;
659 }
660 rcu_read_unlock();
661
662 if (!vid) {
663 /* No VLAN ID is not set */
664 spin_unlock_irqrestore(&nc->lock, flags);
665 return -1;
666 }
667
668 index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
669 if (index < 0 || index >= ncf->n_vids) {
670 netdev_err(ndp->ndev.dev,
671 "Channel %u already has all VLAN filters set\n",
672 nc->id);
673 spin_unlock_irqrestore(&nc->lock, flags);
674 return -1;
675 }
676
677 ncf->vids[index] = vid;
678 set_bit(index, bitmap);
679 spin_unlock_irqrestore(&nc->lock, flags);
680
681 nca->type = NCSI_PKT_CMD_SVF;
682 nca->words[1] = vid;
683 /* HW filter index starts at 1 */
684 nca->bytes[6] = index + 1;
685 nca->bytes[7] = 0x01;
686
687 return 0;
688}
689
690#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
691
692/* NCSI OEM Command APIs */
693static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
694{
695 unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
696 int ret = 0;
697
698 nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
699
700 memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
701 *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
702 data[5] = NCSI_OEM_BCM_CMD_GMA;
703
704 nca->data = data;
705
706 ret = ncsi_xmit_cmd(nca);
707 if (ret)
708 netdev_err(nca->ndp->ndev.dev,
709 "NCSI: Failed to transmit cmd 0x%x during configure\n",
710 nca->type);
711 return ret;
712}
713
714static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
715{
716 union {
717 u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
718 u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
719 } u;
720 int ret = 0;
721
722 nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
723
724 memset(&u, 0, sizeof(u));
725 u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
726 u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
727 u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
728
729 nca->data = u.data_u8;
730
731 ret = ncsi_xmit_cmd(nca);
732 if (ret)
733 netdev_err(nca->ndp->ndev.dev,
734 "NCSI: Failed to transmit cmd 0x%x during configure\n",
735 nca->type);
736 return ret;
737}
738
739/* OEM Command handlers initialization */
740static struct ncsi_oem_gma_handler {
741 unsigned int mfr_id;
742 int (*handler)(struct ncsi_cmd_arg *nca);
743} ncsi_oem_gma_handlers[] = {
744 { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
745 { NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
746};
747
748static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
749{
750 struct ncsi_oem_gma_handler *nch = NULL;
751 int i;
752
753 /* This function should only be called once, return if flag set */
754 if (nca->ndp->gma_flag == 1)
755 return -1;
756
757 /* Find gma handler for given manufacturer id */
758 for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
759 if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
760 if (ncsi_oem_gma_handlers[i].handler)
761 nch = &ncsi_oem_gma_handlers[i];
762 break;
763 }
764 }
765
766 if (!nch) {
767 netdev_err(nca->ndp->ndev.dev,
768 "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
769 mf_id);
770 return -1;
771 }
772
773 /* Get Mac address from NCSI device */
774 return nch->handler(nca);
775}
776
777#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
778
779/* Determine if a given channel from the channel_queue should be used for Tx */
780static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
781 struct ncsi_channel *nc)
782{
783 struct ncsi_channel_mode *ncm;
784 struct ncsi_channel *channel;
785 struct ncsi_package *np;
786
787 /* Check if any other channel has Tx enabled; a channel may have already
788 * been configured and removed from the channel queue.
789 */
790 NCSI_FOR_EACH_PACKAGE(ndp, np) {
791 if (!ndp->multi_package && np != nc->package)
792 continue;
793 NCSI_FOR_EACH_CHANNEL(np, channel) {
794 ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
795 if (ncm->enable)
796 return false;
797 }
798 }
799
800 /* This channel is the preferred channel and has link */
801 list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
802 np = channel->package;
803 if (np->preferred_channel &&
804 ncsi_channel_has_link(np->preferred_channel)) {
805 return np->preferred_channel == nc;
806 }
807 }
808
809 /* This channel has link */
810 if (ncsi_channel_has_link(nc))
811 return true;
812
813 list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
814 if (ncsi_channel_has_link(channel))
815 return false;
816
817 /* No other channel has link; default to this one */
818 return true;
819}
820
821/* Change the active Tx channel in a multi-channel setup */
822int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
823 struct ncsi_package *package,
824 struct ncsi_channel *disable,
825 struct ncsi_channel *enable)
826{
827 struct ncsi_cmd_arg nca;
828 struct ncsi_channel *nc;
829 struct ncsi_package *np;
830 int ret = 0;
831
832 if (!package->multi_channel && !ndp->multi_package)
833 netdev_warn(ndp->ndev.dev,
834 "NCSI: Trying to update Tx channel in single-channel mode\n");
835 nca.ndp = ndp;
836 nca.req_flags = 0;
837
838 /* Find current channel with Tx enabled */
839 NCSI_FOR_EACH_PACKAGE(ndp, np) {
840 if (disable)
841 break;
842 if (!ndp->multi_package && np != package)
843 continue;
844
845 NCSI_FOR_EACH_CHANNEL(np, nc)
846 if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
847 disable = nc;
848 break;
849 }
850 }
851
852 /* Find a suitable channel for Tx */
853 NCSI_FOR_EACH_PACKAGE(ndp, np) {
854 if (enable)
855 break;
856 if (!ndp->multi_package && np != package)
857 continue;
858 if (!(ndp->package_whitelist & (0x1 << np->id)))
859 continue;
860
861 if (np->preferred_channel &&
862 ncsi_channel_has_link(np->preferred_channel)) {
863 enable = np->preferred_channel;
864 break;
865 }
866
867 NCSI_FOR_EACH_CHANNEL(np, nc) {
868 if (!(np->channel_whitelist & 0x1 << nc->id))
869 continue;
870 if (nc->state != NCSI_CHANNEL_ACTIVE)
871 continue;
872 if (ncsi_channel_has_link(nc)) {
873 enable = nc;
874 break;
875 }
876 }
877 }
878
879 if (disable == enable)
880 return -1;
881
882 if (!enable)
883 return -1;
884
885 if (disable) {
886 nca.channel = disable->id;
887 nca.package = disable->package->id;
888 nca.type = NCSI_PKT_CMD_DCNT;
889 ret = ncsi_xmit_cmd(&nca);
890 if (ret)
891 netdev_err(ndp->ndev.dev,
892 "Error %d sending DCNT\n",
893 ret);
894 }
895
896 netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
897
898 nca.channel = enable->id;
899 nca.package = enable->package->id;
900 nca.type = NCSI_PKT_CMD_ECNT;
901 ret = ncsi_xmit_cmd(&nca);
902 if (ret)
903 netdev_err(ndp->ndev.dev,
904 "Error %d sending ECNT\n",
905 ret);
906
907 return ret;
908}
909
910static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
911{
912 struct ncsi_package *np = ndp->active_package;
913 struct ncsi_channel *nc = ndp->active_channel;
914 struct ncsi_channel *hot_nc = NULL;
915 struct ncsi_dev *nd = &ndp->ndev;
916 struct net_device *dev = nd->dev;
917 struct ncsi_cmd_arg nca;
918 unsigned char index;
919 unsigned long flags;
920 int ret;
921
922 nca.ndp = ndp;
923 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
924 switch (nd->state) {
925 case ncsi_dev_state_config:
926 case ncsi_dev_state_config_sp:
927 ndp->pending_req_num = 1;
928
929 /* Select the specific package */
930 nca.type = NCSI_PKT_CMD_SP;
931 if (ndp->flags & NCSI_DEV_HWA)
932 nca.bytes[0] = 0;
933 else
934 nca.bytes[0] = 1;
935 nca.package = np->id;
936 nca.channel = NCSI_RESERVED_CHANNEL;
937 ret = ncsi_xmit_cmd(&nca);
938 if (ret) {
939 netdev_err(ndp->ndev.dev,
940 "NCSI: Failed to transmit CMD_SP\n");
941 goto error;
942 }
943
944 nd->state = ncsi_dev_state_config_cis;
945 break;
946 case ncsi_dev_state_config_cis:
947 ndp->pending_req_num = 1;
948
949 /* Clear initial state */
950 nca.type = NCSI_PKT_CMD_CIS;
951 nca.package = np->id;
952 nca.channel = nc->id;
953 ret = ncsi_xmit_cmd(&nca);
954 if (ret) {
955 netdev_err(ndp->ndev.dev,
956 "NCSI: Failed to transmit CMD_CIS\n");
957 goto error;
958 }
959
960 nd->state = ncsi_dev_state_config_oem_gma;
961 break;
962 case ncsi_dev_state_config_oem_gma:
963 nd->state = ncsi_dev_state_config_clear_vids;
964 ret = -1;
965
966#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
967 nca.type = NCSI_PKT_CMD_OEM;
968 nca.package = np->id;
969 nca.channel = nc->id;
970 ndp->pending_req_num = 1;
971 ret = ncsi_gma_handler(&nca, nc->version.mf_id);
972#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
973
974 if (ret < 0)
975 schedule_work(&ndp->work);
976
977 break;
978 case ncsi_dev_state_config_clear_vids:
979 case ncsi_dev_state_config_svf:
980 case ncsi_dev_state_config_ev:
981 case ncsi_dev_state_config_sma:
982 case ncsi_dev_state_config_ebf:
983 case ncsi_dev_state_config_dgmf:
984 case ncsi_dev_state_config_ecnt:
985 case ncsi_dev_state_config_ec:
986 case ncsi_dev_state_config_ae:
987 case ncsi_dev_state_config_gls:
988 ndp->pending_req_num = 1;
989
990 nca.package = np->id;
991 nca.channel = nc->id;
992
993 /* Clear any active filters on the channel before setting */
994 if (nd->state == ncsi_dev_state_config_clear_vids) {
995 ret = clear_one_vid(ndp, nc, &nca);
996 if (ret) {
997 nd->state = ncsi_dev_state_config_svf;
998 schedule_work(&ndp->work);
999 break;
1000 }
1001 /* Repeat */
1002 nd->state = ncsi_dev_state_config_clear_vids;
1003 /* Add known VLAN tags to the filter */
1004 } else if (nd->state == ncsi_dev_state_config_svf) {
1005 ret = set_one_vid(ndp, nc, &nca);
1006 if (ret) {
1007 nd->state = ncsi_dev_state_config_ev;
1008 schedule_work(&ndp->work);
1009 break;
1010 }
1011 /* Repeat */
1012 nd->state = ncsi_dev_state_config_svf;
1013 /* Enable/Disable the VLAN filter */
1014 } else if (nd->state == ncsi_dev_state_config_ev) {
1015 if (list_empty(&ndp->vlan_vids)) {
1016 nca.type = NCSI_PKT_CMD_DV;
1017 } else {
1018 nca.type = NCSI_PKT_CMD_EV;
1019 nca.bytes[3] = NCSI_CAP_VLAN_NO;
1020 }
1021 nd->state = ncsi_dev_state_config_sma;
1022 } else if (nd->state == ncsi_dev_state_config_sma) {
1023 /* Use first entry in unicast filter table. Note that
1024 * the MAC filter table starts from entry 1 instead of
1025 * 0.
1026 */
1027 nca.type = NCSI_PKT_CMD_SMA;
1028 for (index = 0; index < 6; index++)
1029 nca.bytes[index] = dev->dev_addr[index];
1030 nca.bytes[6] = 0x1;
1031 nca.bytes[7] = 0x1;
1032 nd->state = ncsi_dev_state_config_ebf;
1033 } else if (nd->state == ncsi_dev_state_config_ebf) {
1034 nca.type = NCSI_PKT_CMD_EBF;
1035 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1036 /* if multicast global filtering is supported then
1037 * disable it so that all multicast packet will be
1038 * forwarded to management controller
1039 */
1040 if (nc->caps[NCSI_CAP_GENERIC].cap &
1041 NCSI_CAP_GENERIC_MC)
1042 nd->state = ncsi_dev_state_config_dgmf;
1043 else if (ncsi_channel_is_tx(ndp, nc))
1044 nd->state = ncsi_dev_state_config_ecnt;
1045 else
1046 nd->state = ncsi_dev_state_config_ec;
1047 } else if (nd->state == ncsi_dev_state_config_dgmf) {
1048 nca.type = NCSI_PKT_CMD_DGMF;
1049 if (ncsi_channel_is_tx(ndp, nc))
1050 nd->state = ncsi_dev_state_config_ecnt;
1051 else
1052 nd->state = ncsi_dev_state_config_ec;
1053 } else if (nd->state == ncsi_dev_state_config_ecnt) {
1054 if (np->preferred_channel &&
1055 nc != np->preferred_channel)
1056 netdev_info(ndp->ndev.dev,
1057 "NCSI: Tx failed over to channel %u\n",
1058 nc->id);
1059 nca.type = NCSI_PKT_CMD_ECNT;
1060 nd->state = ncsi_dev_state_config_ec;
1061 } else if (nd->state == ncsi_dev_state_config_ec) {
1062 /* Enable AEN if it's supported */
1063 nca.type = NCSI_PKT_CMD_EC;
1064 nd->state = ncsi_dev_state_config_ae;
1065 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1066 nd->state = ncsi_dev_state_config_gls;
1067 } else if (nd->state == ncsi_dev_state_config_ae) {
1068 nca.type = NCSI_PKT_CMD_AE;
1069 nca.bytes[0] = 0;
1070 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1071 nd->state = ncsi_dev_state_config_gls;
1072 } else if (nd->state == ncsi_dev_state_config_gls) {
1073 nca.type = NCSI_PKT_CMD_GLS;
1074 nd->state = ncsi_dev_state_config_done;
1075 }
1076
1077 ret = ncsi_xmit_cmd(&nca);
1078 if (ret) {
1079 netdev_err(ndp->ndev.dev,
1080 "NCSI: Failed to transmit CMD %x\n",
1081 nca.type);
1082 goto error;
1083 }
1084 break;
1085 case ncsi_dev_state_config_done:
1086 netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1087 nc->id);
1088 spin_lock_irqsave(&nc->lock, flags);
1089 nc->state = NCSI_CHANNEL_ACTIVE;
1090
1091 if (ndp->flags & NCSI_DEV_RESET) {
1092 /* A reset event happened during config, start it now */
1093 nc->reconfigure_needed = false;
1094 spin_unlock_irqrestore(&nc->lock, flags);
1095 ncsi_reset_dev(nd);
1096 break;
1097 }
1098
1099 if (nc->reconfigure_needed) {
1100 /* This channel's configuration has been updated
1101 * part-way during the config state - start the
1102 * channel configuration over
1103 */
1104 nc->reconfigure_needed = false;
1105 nc->state = NCSI_CHANNEL_INACTIVE;
1106 spin_unlock_irqrestore(&nc->lock, flags);
1107
1108 spin_lock_irqsave(&ndp->lock, flags);
1109 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1110 spin_unlock_irqrestore(&ndp->lock, flags);
1111
1112 netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1113 ncsi_process_next_channel(ndp);
1114 break;
1115 }
1116
1117 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1118 hot_nc = nc;
1119 } else {
1120 hot_nc = NULL;
1121 netdev_dbg(ndp->ndev.dev,
1122 "NCSI: channel %u link down after config\n",
1123 nc->id);
1124 }
1125 spin_unlock_irqrestore(&nc->lock, flags);
1126
1127 /* Update the hot channel */
1128 spin_lock_irqsave(&ndp->lock, flags);
1129 ndp->hot_channel = hot_nc;
1130 spin_unlock_irqrestore(&ndp->lock, flags);
1131
1132 ncsi_start_channel_monitor(nc);
1133 ncsi_process_next_channel(ndp);
1134 break;
1135 default:
1136 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1137 nd->state);
1138 }
1139
1140 return;
1141
1142error:
1143 ncsi_report_link(ndp, true);
1144}
1145
1146static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1147{
1148 struct ncsi_channel *nc, *found, *hot_nc;
1149 struct ncsi_channel_mode *ncm;
1150 unsigned long flags, cflags;
1151 struct ncsi_package *np;
1152 bool with_link;
1153
1154 spin_lock_irqsave(&ndp->lock, flags);
1155 hot_nc = ndp->hot_channel;
1156 spin_unlock_irqrestore(&ndp->lock, flags);
1157
1158 /* By default the search is done once an inactive channel with up
1159 * link is found, unless a preferred channel is set.
1160 * If multi_package or multi_channel are configured all channels in the
1161 * whitelist are added to the channel queue.
1162 */
1163 found = NULL;
1164 with_link = false;
1165 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1166 if (!(ndp->package_whitelist & (0x1 << np->id)))
1167 continue;
1168 NCSI_FOR_EACH_CHANNEL(np, nc) {
1169 if (!(np->channel_whitelist & (0x1 << nc->id)))
1170 continue;
1171
1172 spin_lock_irqsave(&nc->lock, cflags);
1173
1174 if (!list_empty(&nc->link) ||
1175 nc->state != NCSI_CHANNEL_INACTIVE) {
1176 spin_unlock_irqrestore(&nc->lock, cflags);
1177 continue;
1178 }
1179
1180 if (!found)
1181 found = nc;
1182
1183 if (nc == hot_nc)
1184 found = nc;
1185
1186 ncm = &nc->modes[NCSI_MODE_LINK];
1187 if (ncm->data[2] & 0x1) {
1188 found = nc;
1189 with_link = true;
1190 }
1191
1192 /* If multi_channel is enabled configure all valid
1193 * channels whether or not they currently have link
1194 * so they will have AENs enabled.
1195 */
1196 if (with_link || np->multi_channel) {
1197 spin_lock_irqsave(&ndp->lock, flags);
1198 list_add_tail_rcu(&nc->link,
1199 &ndp->channel_queue);
1200 spin_unlock_irqrestore(&ndp->lock, flags);
1201
1202 netdev_dbg(ndp->ndev.dev,
1203 "NCSI: Channel %u added to queue (link %s)\n",
1204 nc->id,
1205 ncm->data[2] & 0x1 ? "up" : "down");
1206 }
1207
1208 spin_unlock_irqrestore(&nc->lock, cflags);
1209
1210 if (with_link && !np->multi_channel)
1211 break;
1212 }
1213 if (with_link && !ndp->multi_package)
1214 break;
1215 }
1216
1217 if (list_empty(&ndp->channel_queue) && found) {
1218 netdev_info(ndp->ndev.dev,
1219 "NCSI: No channel with link found, configuring channel %u\n",
1220 found->id);
1221 spin_lock_irqsave(&ndp->lock, flags);
1222 list_add_tail_rcu(&found->link, &ndp->channel_queue);
1223 spin_unlock_irqrestore(&ndp->lock, flags);
1224 } else if (!found) {
1225 netdev_warn(ndp->ndev.dev,
1226 "NCSI: No channel found to configure!\n");
1227 ncsi_report_link(ndp, true);
1228 return -ENODEV;
1229 }
1230
1231 return ncsi_process_next_channel(ndp);
1232}
1233
1234static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1235{
1236 struct ncsi_package *np;
1237 struct ncsi_channel *nc;
1238 unsigned int cap;
1239 bool has_channel = false;
1240
1241 /* The hardware arbitration is disabled if any one channel
1242 * doesn't support explicitly.
1243 */
1244 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1245 NCSI_FOR_EACH_CHANNEL(np, nc) {
1246 has_channel = true;
1247
1248 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1249 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1250 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1251 NCSI_CAP_GENERIC_HWA_SUPPORT) {
1252 ndp->flags &= ~NCSI_DEV_HWA;
1253 return false;
1254 }
1255 }
1256 }
1257
1258 if (has_channel) {
1259 ndp->flags |= NCSI_DEV_HWA;
1260 return true;
1261 }
1262
1263 ndp->flags &= ~NCSI_DEV_HWA;
1264 return false;
1265}
1266
1267static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1268{
1269 struct ncsi_dev *nd = &ndp->ndev;
1270 struct ncsi_package *np;
1271 struct ncsi_channel *nc;
1272 struct ncsi_cmd_arg nca;
1273 unsigned char index;
1274 int ret;
1275
1276 nca.ndp = ndp;
1277 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1278 switch (nd->state) {
1279 case ncsi_dev_state_probe:
1280 nd->state = ncsi_dev_state_probe_deselect;
1281 /* Fall through */
1282 case ncsi_dev_state_probe_deselect:
1283 ndp->pending_req_num = 8;
1284
1285 /* Deselect all possible packages */
1286 nca.type = NCSI_PKT_CMD_DP;
1287 nca.channel = NCSI_RESERVED_CHANNEL;
1288 for (index = 0; index < 8; index++) {
1289 nca.package = index;
1290 ret = ncsi_xmit_cmd(&nca);
1291 if (ret)
1292 goto error;
1293 }
1294
1295 nd->state = ncsi_dev_state_probe_package;
1296 break;
1297 case ncsi_dev_state_probe_package:
1298 ndp->pending_req_num = 1;
1299
1300 nca.type = NCSI_PKT_CMD_SP;
1301 nca.bytes[0] = 1;
1302 nca.package = ndp->package_probe_id;
1303 nca.channel = NCSI_RESERVED_CHANNEL;
1304 ret = ncsi_xmit_cmd(&nca);
1305 if (ret)
1306 goto error;
1307 nd->state = ncsi_dev_state_probe_channel;
1308 break;
1309 case ncsi_dev_state_probe_channel:
1310 ndp->active_package = ncsi_find_package(ndp,
1311 ndp->package_probe_id);
1312 if (!ndp->active_package) {
1313 /* No response */
1314 nd->state = ncsi_dev_state_probe_dp;
1315 schedule_work(&ndp->work);
1316 break;
1317 }
1318 nd->state = ncsi_dev_state_probe_cis;
1319 schedule_work(&ndp->work);
1320 break;
1321 case ncsi_dev_state_probe_cis:
1322 ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1323
1324 /* Clear initial state */
1325 nca.type = NCSI_PKT_CMD_CIS;
1326 nca.package = ndp->active_package->id;
1327 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1328 nca.channel = index;
1329 ret = ncsi_xmit_cmd(&nca);
1330 if (ret)
1331 goto error;
1332 }
1333
1334 nd->state = ncsi_dev_state_probe_gvi;
1335 break;
1336 case ncsi_dev_state_probe_gvi:
1337 case ncsi_dev_state_probe_gc:
1338 case ncsi_dev_state_probe_gls:
1339 np = ndp->active_package;
1340 ndp->pending_req_num = np->channel_num;
1341
1342 /* Retrieve version, capability or link status */
1343 if (nd->state == ncsi_dev_state_probe_gvi)
1344 nca.type = NCSI_PKT_CMD_GVI;
1345 else if (nd->state == ncsi_dev_state_probe_gc)
1346 nca.type = NCSI_PKT_CMD_GC;
1347 else
1348 nca.type = NCSI_PKT_CMD_GLS;
1349
1350 nca.package = np->id;
1351 NCSI_FOR_EACH_CHANNEL(np, nc) {
1352 nca.channel = nc->id;
1353 ret = ncsi_xmit_cmd(&nca);
1354 if (ret)
1355 goto error;
1356 }
1357
1358 if (nd->state == ncsi_dev_state_probe_gvi)
1359 nd->state = ncsi_dev_state_probe_gc;
1360 else if (nd->state == ncsi_dev_state_probe_gc)
1361 nd->state = ncsi_dev_state_probe_gls;
1362 else
1363 nd->state = ncsi_dev_state_probe_dp;
1364 break;
1365 case ncsi_dev_state_probe_dp:
1366 ndp->pending_req_num = 1;
1367
1368 /* Deselect the current package */
1369 nca.type = NCSI_PKT_CMD_DP;
1370 nca.package = ndp->package_probe_id;
1371 nca.channel = NCSI_RESERVED_CHANNEL;
1372 ret = ncsi_xmit_cmd(&nca);
1373 if (ret)
1374 goto error;
1375
1376 /* Probe next package */
1377 ndp->package_probe_id++;
1378 if (ndp->package_probe_id >= 8) {
1379 /* Probe finished */
1380 ndp->flags |= NCSI_DEV_PROBED;
1381 break;
1382 }
1383 nd->state = ncsi_dev_state_probe_package;
1384 ndp->active_package = NULL;
1385 break;
1386 default:
1387 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1388 nd->state);
1389 }
1390
1391 if (ndp->flags & NCSI_DEV_PROBED) {
1392 /* Check if all packages have HWA support */
1393 ncsi_check_hwa(ndp);
1394 ncsi_choose_active_channel(ndp);
1395 }
1396
1397 return;
1398error:
1399 netdev_err(ndp->ndev.dev,
1400 "NCSI: Failed to transmit cmd 0x%x during probe\n",
1401 nca.type);
1402 ncsi_report_link(ndp, true);
1403}
1404
1405static void ncsi_dev_work(struct work_struct *work)
1406{
1407 struct ncsi_dev_priv *ndp = container_of(work,
1408 struct ncsi_dev_priv, work);
1409 struct ncsi_dev *nd = &ndp->ndev;
1410
1411 switch (nd->state & ncsi_dev_state_major) {
1412 case ncsi_dev_state_probe:
1413 ncsi_probe_channel(ndp);
1414 break;
1415 case ncsi_dev_state_suspend:
1416 ncsi_suspend_channel(ndp);
1417 break;
1418 case ncsi_dev_state_config:
1419 ncsi_configure_channel(ndp);
1420 break;
1421 default:
1422 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1423 nd->state);
1424 }
1425}
1426
1427int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1428{
1429 struct ncsi_channel *nc;
1430 int old_state;
1431 unsigned long flags;
1432
1433 spin_lock_irqsave(&ndp->lock, flags);
1434 nc = list_first_or_null_rcu(&ndp->channel_queue,
1435 struct ncsi_channel, link);
1436 if (!nc) {
1437 spin_unlock_irqrestore(&ndp->lock, flags);
1438 goto out;
1439 }
1440
1441 list_del_init(&nc->link);
1442 spin_unlock_irqrestore(&ndp->lock, flags);
1443
1444 spin_lock_irqsave(&nc->lock, flags);
1445 old_state = nc->state;
1446 nc->state = NCSI_CHANNEL_INVISIBLE;
1447 spin_unlock_irqrestore(&nc->lock, flags);
1448
1449 ndp->active_channel = nc;
1450 ndp->active_package = nc->package;
1451
1452 switch (old_state) {
1453 case NCSI_CHANNEL_INACTIVE:
1454 ndp->ndev.state = ncsi_dev_state_config;
1455 netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1456 nc->id);
1457 ncsi_configure_channel(ndp);
1458 break;
1459 case NCSI_CHANNEL_ACTIVE:
1460 ndp->ndev.state = ncsi_dev_state_suspend;
1461 netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1462 nc->id);
1463 ncsi_suspend_channel(ndp);
1464 break;
1465 default:
1466 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1467 old_state, nc->package->id, nc->id);
1468 ncsi_report_link(ndp, false);
1469 return -EINVAL;
1470 }
1471
1472 return 0;
1473
1474out:
1475 ndp->active_channel = NULL;
1476 ndp->active_package = NULL;
1477 if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1478 ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1479 return ncsi_choose_active_channel(ndp);
1480 }
1481
1482 ncsi_report_link(ndp, false);
1483 return -ENODEV;
1484}
1485
1486static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1487{
1488 struct ncsi_dev *nd = &ndp->ndev;
1489 struct ncsi_channel *nc;
1490 struct ncsi_package *np;
1491 unsigned long flags;
1492 unsigned int n = 0;
1493
1494 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1495 NCSI_FOR_EACH_CHANNEL(np, nc) {
1496 spin_lock_irqsave(&nc->lock, flags);
1497
1498 /* Channels may be busy, mark dirty instead of
1499 * kicking if;
1500 * a) not ACTIVE (configured)
1501 * b) in the channel_queue (to be configured)
1502 * c) it's ndev is in the config state
1503 */
1504 if (nc->state != NCSI_CHANNEL_ACTIVE) {
1505 if ((ndp->ndev.state & 0xff00) ==
1506 ncsi_dev_state_config ||
1507 !list_empty(&nc->link)) {
1508 netdev_dbg(nd->dev,
1509 "NCSI: channel %p marked dirty\n",
1510 nc);
1511 nc->reconfigure_needed = true;
1512 }
1513 spin_unlock_irqrestore(&nc->lock, flags);
1514 continue;
1515 }
1516
1517 spin_unlock_irqrestore(&nc->lock, flags);
1518
1519 ncsi_stop_channel_monitor(nc);
1520 spin_lock_irqsave(&nc->lock, flags);
1521 nc->state = NCSI_CHANNEL_INACTIVE;
1522 spin_unlock_irqrestore(&nc->lock, flags);
1523
1524 spin_lock_irqsave(&ndp->lock, flags);
1525 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1526 spin_unlock_irqrestore(&ndp->lock, flags);
1527
1528 netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1529 n++;
1530 }
1531 }
1532
1533 return n;
1534}
1535
1536int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1537{
1538 struct ncsi_dev_priv *ndp;
1539 unsigned int n_vids = 0;
1540 struct vlan_vid *vlan;
1541 struct ncsi_dev *nd;
1542 bool found = false;
1543
1544 if (vid == 0)
1545 return 0;
1546
1547 nd = ncsi_find_dev(dev);
1548 if (!nd) {
1549 netdev_warn(dev, "NCSI: No net_device?\n");
1550 return 0;
1551 }
1552
1553 ndp = TO_NCSI_DEV_PRIV(nd);
1554
1555 /* Add the VLAN id to our internal list */
1556 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1557 n_vids++;
1558 if (vlan->vid == vid) {
1559 netdev_dbg(dev, "NCSI: vid %u already registered\n",
1560 vid);
1561 return 0;
1562 }
1563 }
1564 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1565 netdev_warn(dev,
1566 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1567 vid, NCSI_MAX_VLAN_VIDS);
1568 return -ENOSPC;
1569 }
1570
1571 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1572 if (!vlan)
1573 return -ENOMEM;
1574
1575 vlan->proto = proto;
1576 vlan->vid = vid;
1577 list_add_rcu(&vlan->list, &ndp->vlan_vids);
1578
1579 netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1580
1581 found = ncsi_kick_channels(ndp) != 0;
1582
1583 return found ? ncsi_process_next_channel(ndp) : 0;
1584}
1585EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1586
1587int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1588{
1589 struct vlan_vid *vlan, *tmp;
1590 struct ncsi_dev_priv *ndp;
1591 struct ncsi_dev *nd;
1592 bool found = false;
1593
1594 if (vid == 0)
1595 return 0;
1596
1597 nd = ncsi_find_dev(dev);
1598 if (!nd) {
1599 netdev_warn(dev, "NCSI: no net_device?\n");
1600 return 0;
1601 }
1602
1603 ndp = TO_NCSI_DEV_PRIV(nd);
1604
1605 /* Remove the VLAN id from our internal list */
1606 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1607 if (vlan->vid == vid) {
1608 netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1609 list_del_rcu(&vlan->list);
1610 found = true;
1611 kfree(vlan);
1612 }
1613
1614 if (!found) {
1615 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1616 return -EINVAL;
1617 }
1618
1619 found = ncsi_kick_channels(ndp) != 0;
1620
1621 return found ? ncsi_process_next_channel(ndp) : 0;
1622}
1623EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1624
1625struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1626 void (*handler)(struct ncsi_dev *ndev))
1627{
1628 struct ncsi_dev_priv *ndp;
1629 struct ncsi_dev *nd;
1630 unsigned long flags;
1631 int i;
1632
1633 /* Check if the device has been registered or not */
1634 nd = ncsi_find_dev(dev);
1635 if (nd)
1636 return nd;
1637
1638 /* Create NCSI device */
1639 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1640 if (!ndp)
1641 return NULL;
1642
1643 nd = &ndp->ndev;
1644 nd->state = ncsi_dev_state_registered;
1645 nd->dev = dev;
1646 nd->handler = handler;
1647 ndp->pending_req_num = 0;
1648 INIT_LIST_HEAD(&ndp->channel_queue);
1649 INIT_LIST_HEAD(&ndp->vlan_vids);
1650 INIT_WORK(&ndp->work, ncsi_dev_work);
1651 ndp->package_whitelist = UINT_MAX;
1652
1653 /* Initialize private NCSI device */
1654 spin_lock_init(&ndp->lock);
1655 INIT_LIST_HEAD(&ndp->packages);
1656 ndp->request_id = NCSI_REQ_START_IDX;
1657 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1658 ndp->requests[i].id = i;
1659 ndp->requests[i].ndp = ndp;
1660 timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1661 }
1662
1663 spin_lock_irqsave(&ncsi_dev_lock, flags);
1664 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1665 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1666
1667 /* Register NCSI packet Rx handler */
1668 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1669 ndp->ptype.func = ncsi_rcv_rsp;
1670 ndp->ptype.dev = dev;
1671 dev_add_pack(&ndp->ptype);
1672
1673 return nd;
1674}
1675EXPORT_SYMBOL_GPL(ncsi_register_dev);
1676
1677int ncsi_start_dev(struct ncsi_dev *nd)
1678{
1679 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1680
1681 if (nd->state != ncsi_dev_state_registered &&
1682 nd->state != ncsi_dev_state_functional)
1683 return -ENOTTY;
1684
1685 if (!(ndp->flags & NCSI_DEV_PROBED)) {
1686 ndp->package_probe_id = 0;
1687 nd->state = ncsi_dev_state_probe;
1688 schedule_work(&ndp->work);
1689 return 0;
1690 }
1691
1692 return ncsi_reset_dev(nd);
1693}
1694EXPORT_SYMBOL_GPL(ncsi_start_dev);
1695
1696void ncsi_stop_dev(struct ncsi_dev *nd)
1697{
1698 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1699 struct ncsi_package *np;
1700 struct ncsi_channel *nc;
1701 bool chained;
1702 int old_state;
1703 unsigned long flags;
1704
1705 /* Stop the channel monitor on any active channels. Don't reset the
1706 * channel state so we know which were active when ncsi_start_dev()
1707 * is next called.
1708 */
1709 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1710 NCSI_FOR_EACH_CHANNEL(np, nc) {
1711 ncsi_stop_channel_monitor(nc);
1712
1713 spin_lock_irqsave(&nc->lock, flags);
1714 chained = !list_empty(&nc->link);
1715 old_state = nc->state;
1716 spin_unlock_irqrestore(&nc->lock, flags);
1717
1718 WARN_ON_ONCE(chained ||
1719 old_state == NCSI_CHANNEL_INVISIBLE);
1720 }
1721 }
1722
1723 netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1724 ncsi_report_link(ndp, true);
1725}
1726EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1727
1728int ncsi_reset_dev(struct ncsi_dev *nd)
1729{
1730 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1731 struct ncsi_channel *nc, *active, *tmp;
1732 struct ncsi_package *np;
1733 unsigned long flags;
1734
1735 spin_lock_irqsave(&ndp->lock, flags);
1736
1737 if (!(ndp->flags & NCSI_DEV_RESET)) {
1738 /* Haven't been called yet, check states */
1739 switch (nd->state & ncsi_dev_state_major) {
1740 case ncsi_dev_state_registered:
1741 case ncsi_dev_state_probe:
1742 /* Not even probed yet - do nothing */
1743 spin_unlock_irqrestore(&ndp->lock, flags);
1744 return 0;
1745 case ncsi_dev_state_suspend:
1746 case ncsi_dev_state_config:
1747 /* Wait for the channel to finish its suspend/config
1748 * operation; once it finishes it will check for
1749 * NCSI_DEV_RESET and reset the state.
1750 */
1751 ndp->flags |= NCSI_DEV_RESET;
1752 spin_unlock_irqrestore(&ndp->lock, flags);
1753 return 0;
1754 }
1755 } else {
1756 switch (nd->state) {
1757 case ncsi_dev_state_suspend_done:
1758 case ncsi_dev_state_config_done:
1759 case ncsi_dev_state_functional:
1760 /* Ok */
1761 break;
1762 default:
1763 /* Current reset operation happening */
1764 spin_unlock_irqrestore(&ndp->lock, flags);
1765 return 0;
1766 }
1767 }
1768
1769 if (!list_empty(&ndp->channel_queue)) {
1770 /* Clear any channel queue we may have interrupted */
1771 list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1772 list_del_init(&nc->link);
1773 }
1774 spin_unlock_irqrestore(&ndp->lock, flags);
1775
1776 active = NULL;
1777 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1778 NCSI_FOR_EACH_CHANNEL(np, nc) {
1779 spin_lock_irqsave(&nc->lock, flags);
1780
1781 if (nc->state == NCSI_CHANNEL_ACTIVE) {
1782 active = nc;
1783 nc->state = NCSI_CHANNEL_INVISIBLE;
1784 spin_unlock_irqrestore(&nc->lock, flags);
1785 ncsi_stop_channel_monitor(nc);
1786 break;
1787 }
1788
1789 spin_unlock_irqrestore(&nc->lock, flags);
1790 }
1791 if (active)
1792 break;
1793 }
1794
1795 if (!active) {
1796 /* Done */
1797 spin_lock_irqsave(&ndp->lock, flags);
1798 ndp->flags &= ~NCSI_DEV_RESET;
1799 spin_unlock_irqrestore(&ndp->lock, flags);
1800 return ncsi_choose_active_channel(ndp);
1801 }
1802
1803 spin_lock_irqsave(&ndp->lock, flags);
1804 ndp->flags |= NCSI_DEV_RESET;
1805 ndp->active_channel = active;
1806 ndp->active_package = active->package;
1807 spin_unlock_irqrestore(&ndp->lock, flags);
1808
1809 nd->state = ncsi_dev_state_suspend;
1810 schedule_work(&ndp->work);
1811 return 0;
1812}
1813
1814void ncsi_unregister_dev(struct ncsi_dev *nd)
1815{
1816 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1817 struct ncsi_package *np, *tmp;
1818 unsigned long flags;
1819
1820 dev_remove_pack(&ndp->ptype);
1821
1822 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1823 ncsi_remove_package(np);
1824
1825 spin_lock_irqsave(&ncsi_dev_lock, flags);
1826 list_del_rcu(&ndp->node);
1827 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1828
1829 kfree(ndp);
1830}
1831EXPORT_SYMBOL_GPL(ncsi_unregister_dev);