blob: 56299e929791ad23f0c7b11faa1c416b2c822115 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * net/drivers/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/rcupdate.h>
17#include <linux/errno.h>
18#include <linux/ctype.h>
19#include <linux/notifier.h>
20#include <linux/netdevice.h>
21#include <linux/if_vlan.h>
22#include <linux/if_arp.h>
23#include <linux/socket.h>
24#include <linux/etherdevice.h>
25#include <linux/rtnetlink.h>
26#include <net/rtnetlink.h>
27#include <net/genetlink.h>
28#include <net/netlink.h>
29#include <linux/if_team.h>
30
31#define DRV_NAME "team"
32
33
34/**********
35 * Helpers
36 **********/
37
38#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
39
40static struct team_port *team_port_get_rcu(const struct net_device *dev)
41{
42 return rcu_dereference(dev->rx_handler_data);
43}
44
45static struct team_port *team_port_get_rtnl(const struct net_device *dev)
46{
47 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
48
49 return team_port_exists(dev) ? port : NULL;
50}
51
52/*
53 * Since the ability to change mac address for open port device is tested in
54 * team_port_add, this function can be called without control of return value
55 */
56static int __set_port_mac(struct net_device *port_dev,
57 const unsigned char *dev_addr)
58{
59 struct sockaddr addr;
60
61 memcpy(addr.sa_data, dev_addr, ETH_ALEN);
62 addr.sa_family = ARPHRD_ETHER;
63 return dev_set_mac_address(port_dev, &addr);
64}
65
66int team_port_set_orig_mac(struct team_port *port)
67{
68 return __set_port_mac(port->dev, port->orig.dev_addr);
69}
70
71int team_port_set_team_mac(struct team_port *port)
72{
73 return __set_port_mac(port->dev, port->team->dev->dev_addr);
74}
75EXPORT_SYMBOL(team_port_set_team_mac);
76
77
78/*******************
79 * Options handling
80 *******************/
81
82struct team_option *__team_find_option(struct team *team, const char *opt_name)
83{
84 struct team_option *option;
85
86 list_for_each_entry(option, &team->option_list, list) {
87 if (strcmp(option->name, opt_name) == 0)
88 return option;
89 }
90 return NULL;
91}
92
93int __team_options_register(struct team *team,
94 const struct team_option *option,
95 size_t option_count)
96{
97 int i;
98 struct team_option **dst_opts;
99 int err;
100
101 dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
102 GFP_KERNEL);
103 if (!dst_opts)
104 return -ENOMEM;
105 for (i = 0; i < option_count; i++, option++) {
106 if (__team_find_option(team, option->name)) {
107 err = -EEXIST;
108 goto rollback;
109 }
110 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
111 if (!dst_opts[i]) {
112 err = -ENOMEM;
113 goto rollback;
114 }
115 }
116
117 for (i = 0; i < option_count; i++) {
118 dst_opts[i]->changed = true;
119 dst_opts[i]->removed = false;
120 list_add_tail(&dst_opts[i]->list, &team->option_list);
121 }
122
123 kfree(dst_opts);
124 return 0;
125
126rollback:
127 for (i = 0; i < option_count; i++)
128 kfree(dst_opts[i]);
129
130 kfree(dst_opts);
131 return err;
132}
133
134static void __team_options_mark_removed(struct team *team,
135 const struct team_option *option,
136 size_t option_count)
137{
138 int i;
139
140 for (i = 0; i < option_count; i++, option++) {
141 struct team_option *del_opt;
142
143 del_opt = __team_find_option(team, option->name);
144 if (del_opt) {
145 del_opt->changed = true;
146 del_opt->removed = true;
147 }
148 }
149}
150
151static void __team_options_unregister(struct team *team,
152 const struct team_option *option,
153 size_t option_count)
154{
155 int i;
156
157 for (i = 0; i < option_count; i++, option++) {
158 struct team_option *del_opt;
159
160 del_opt = __team_find_option(team, option->name);
161 if (del_opt) {
162 list_del(&del_opt->list);
163 kfree(del_opt);
164 }
165 }
166}
167
168static void __team_options_change_check(struct team *team);
169
170int team_options_register(struct team *team,
171 const struct team_option *option,
172 size_t option_count)
173{
174 int err;
175
176 err = __team_options_register(team, option, option_count);
177 if (err)
178 return err;
179 __team_options_change_check(team);
180 return 0;
181}
182EXPORT_SYMBOL(team_options_register);
183
184void team_options_unregister(struct team *team,
185 const struct team_option *option,
186 size_t option_count)
187{
188 __team_options_mark_removed(team, option, option_count);
189 __team_options_change_check(team);
190 __team_options_unregister(team, option, option_count);
191}
192EXPORT_SYMBOL(team_options_unregister);
193
194static int team_option_get(struct team *team, struct team_option *option,
195 void *arg)
196{
197 return option->getter(team, arg);
198}
199
200static int team_option_set(struct team *team, struct team_option *option,
201 void *arg)
202{
203 int err;
204
205 err = option->setter(team, arg);
206 if (err)
207 return err;
208
209 option->changed = true;
210 __team_options_change_check(team);
211 return err;
212}
213
214/****************
215 * Mode handling
216 ****************/
217
218static LIST_HEAD(mode_list);
219static DEFINE_SPINLOCK(mode_list_lock);
220
221static struct team_mode *__find_mode(const char *kind)
222{
223 struct team_mode *mode;
224
225 list_for_each_entry(mode, &mode_list, list) {
226 if (strcmp(mode->kind, kind) == 0)
227 return mode;
228 }
229 return NULL;
230}
231
232static bool is_good_mode_name(const char *name)
233{
234 while (*name != '\0') {
235 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
236 return false;
237 name++;
238 }
239 return true;
240}
241
242int team_mode_register(struct team_mode *mode)
243{
244 int err = 0;
245
246 if (!is_good_mode_name(mode->kind) ||
247 mode->priv_size > TEAM_MODE_PRIV_SIZE)
248 return -EINVAL;
249 spin_lock(&mode_list_lock);
250 if (__find_mode(mode->kind)) {
251 err = -EEXIST;
252 goto unlock;
253 }
254 list_add_tail(&mode->list, &mode_list);
255unlock:
256 spin_unlock(&mode_list_lock);
257 return err;
258}
259EXPORT_SYMBOL(team_mode_register);
260
261int team_mode_unregister(struct team_mode *mode)
262{
263 spin_lock(&mode_list_lock);
264 list_del_init(&mode->list);
265 spin_unlock(&mode_list_lock);
266 return 0;
267}
268EXPORT_SYMBOL(team_mode_unregister);
269
270static struct team_mode *team_mode_get(const char *kind)
271{
272 struct team_mode *mode;
273
274 spin_lock(&mode_list_lock);
275 mode = __find_mode(kind);
276 if (!mode) {
277 spin_unlock(&mode_list_lock);
278 request_module("team-mode-%s", kind);
279 spin_lock(&mode_list_lock);
280 mode = __find_mode(kind);
281 }
282 if (mode)
283 if (!try_module_get(mode->owner))
284 mode = NULL;
285
286 spin_unlock(&mode_list_lock);
287 return mode;
288}
289
290static void team_mode_put(const struct team_mode *mode)
291{
292 module_put(mode->owner);
293}
294
295static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
296{
297 dev_kfree_skb_any(skb);
298 return false;
299}
300
301rx_handler_result_t team_dummy_receive(struct team *team,
302 struct team_port *port,
303 struct sk_buff *skb)
304{
305 return RX_HANDLER_ANOTHER;
306}
307
308static void team_adjust_ops(struct team *team)
309{
310 /*
311 * To avoid checks in rx/tx skb paths, ensure here that non-null and
312 * correct ops are always set.
313 */
314
315 if (list_empty(&team->port_list) ||
316 !team->mode || !team->mode->ops->transmit)
317 team->ops.transmit = team_dummy_transmit;
318 else
319 team->ops.transmit = team->mode->ops->transmit;
320
321 if (list_empty(&team->port_list) ||
322 !team->mode || !team->mode->ops->receive)
323 team->ops.receive = team_dummy_receive;
324 else
325 team->ops.receive = team->mode->ops->receive;
326}
327
328/*
329 * We can benefit from the fact that it's ensured no port is present
330 * at the time of mode change. Therefore no packets are in fly so there's no
331 * need to set mode operations in any special way.
332 */
333static int __team_change_mode(struct team *team,
334 const struct team_mode *new_mode)
335{
336 /* Check if mode was previously set and do cleanup if so */
337 if (team->mode) {
338 void (*exit_op)(struct team *team) = team->ops.exit;
339
340 /* Clear ops area so no callback is called any longer */
341 memset(&team->ops, 0, sizeof(struct team_mode_ops));
342 team_adjust_ops(team);
343
344 if (exit_op)
345 exit_op(team);
346 team_mode_put(team->mode);
347 team->mode = NULL;
348 /* zero private data area */
349 memset(&team->mode_priv, 0,
350 sizeof(struct team) - offsetof(struct team, mode_priv));
351 }
352
353 if (!new_mode)
354 return 0;
355
356 if (new_mode->ops->init) {
357 int err;
358
359 err = new_mode->ops->init(team);
360 if (err)
361 return err;
362 }
363
364 team->mode = new_mode;
365 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
366 team_adjust_ops(team);
367
368 return 0;
369}
370
371static int team_change_mode(struct team *team, const char *kind)
372{
373 struct team_mode *new_mode;
374 struct net_device *dev = team->dev;
375 int err;
376
377 if (!list_empty(&team->port_list)) {
378 netdev_err(dev, "No ports can be present during mode change\n");
379 return -EBUSY;
380 }
381
382 if (team->mode && strcmp(team->mode->kind, kind) == 0) {
383 netdev_err(dev, "Unable to change to the same mode the team is in\n");
384 return -EINVAL;
385 }
386
387 new_mode = team_mode_get(kind);
388 if (!new_mode) {
389 netdev_err(dev, "Mode \"%s\" not found\n", kind);
390 return -EINVAL;
391 }
392
393 err = __team_change_mode(team, new_mode);
394 if (err) {
395 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
396 team_mode_put(new_mode);
397 return err;
398 }
399
400 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
401 return 0;
402}
403
404
405/************************
406 * Rx path frame handler
407 ************************/
408
409/* note: already called with rcu_read_lock */
410static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
411{
412 struct sk_buff *skb = *pskb;
413 struct team_port *port;
414 struct team *team;
415 rx_handler_result_t res;
416
417 skb = skb_share_check(skb, GFP_ATOMIC);
418 if (!skb)
419 return RX_HANDLER_CONSUMED;
420
421 *pskb = skb;
422
423 port = team_port_get_rcu(skb->dev);
424 team = port->team;
425
426 res = team->ops.receive(team, port, skb);
427 if (res == RX_HANDLER_ANOTHER) {
428 struct team_pcpu_stats *pcpu_stats;
429
430 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
431 u64_stats_update_begin(&pcpu_stats->syncp);
432 pcpu_stats->rx_packets++;
433 pcpu_stats->rx_bytes += skb->len;
434 if (skb->pkt_type == PACKET_MULTICAST)
435 pcpu_stats->rx_multicast++;
436 u64_stats_update_end(&pcpu_stats->syncp);
437
438 skb->dev = team->dev;
439 } else {
440 this_cpu_inc(team->pcpu_stats->rx_dropped);
441 }
442
443 return res;
444}
445
446
447/****************
448 * Port handling
449 ****************/
450
451static bool team_port_find(const struct team *team,
452 const struct team_port *port)
453{
454 struct team_port *cur;
455
456 list_for_each_entry(cur, &team->port_list, list)
457 if (cur == port)
458 return true;
459 return false;
460}
461
462/*
463 * Add/delete port to the team port list. Write guarded by rtnl_lock.
464 * Takes care of correct port->index setup (might be racy).
465 */
466static void team_port_list_add_port(struct team *team,
467 struct team_port *port)
468{
469 port->index = team->port_count++;
470 hlist_add_head_rcu(&port->hlist,
471 team_port_index_hash(team, port->index));
472 list_add_tail_rcu(&port->list, &team->port_list);
473}
474
475static void __reconstruct_port_hlist(struct team *team, int rm_index)
476{
477 int i;
478 struct team_port *port;
479
480 for (i = rm_index + 1; i < team->port_count; i++) {
481 port = team_get_port_by_index(team, i);
482 hlist_del_rcu(&port->hlist);
483 port->index--;
484 hlist_add_head_rcu(&port->hlist,
485 team_port_index_hash(team, port->index));
486 }
487}
488
489static void team_port_list_del_port(struct team *team,
490 struct team_port *port)
491{
492 int rm_index = port->index;
493
494 hlist_del_rcu(&port->hlist);
495 list_del_rcu(&port->list);
496 __reconstruct_port_hlist(team, rm_index);
497 team->port_count--;
498}
499
500#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
501 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
502 NETIF_F_HIGHDMA | NETIF_F_LRO)
503
504static void __team_compute_features(struct team *team)
505{
506 struct team_port *port;
507 u32 vlan_features = TEAM_VLAN_FEATURES;
508 unsigned short max_hard_header_len = ETH_HLEN;
509
510 list_for_each_entry(port, &team->port_list, list) {
511 vlan_features = netdev_increment_features(vlan_features,
512 port->dev->vlan_features,
513 TEAM_VLAN_FEATURES);
514
515 if (port->dev->hard_header_len > max_hard_header_len)
516 max_hard_header_len = port->dev->hard_header_len;
517 }
518
519 team->dev->vlan_features = vlan_features;
520 team->dev->hard_header_len = max_hard_header_len;
521
522 netdev_change_features(team->dev);
523}
524
525static void team_compute_features(struct team *team)
526{
527 mutex_lock(&team->lock);
528 __team_compute_features(team);
529 mutex_unlock(&team->lock);
530}
531
532static int team_port_enter(struct team *team, struct team_port *port)
533{
534 int err = 0;
535
536 dev_hold(team->dev);
537 port->dev->priv_flags |= IFF_TEAM_PORT;
538 if (team->ops.port_enter) {
539 err = team->ops.port_enter(team, port);
540 if (err) {
541 netdev_err(team->dev, "Device %s failed to enter team mode\n",
542 port->dev->name);
543 goto err_port_enter;
544 }
545 }
546
547 return 0;
548
549err_port_enter:
550 port->dev->priv_flags &= ~IFF_TEAM_PORT;
551 dev_put(team->dev);
552
553 return err;
554}
555
556static void team_port_leave(struct team *team, struct team_port *port)
557{
558 if (team->ops.port_leave)
559 team->ops.port_leave(team, port);
560 port->dev->priv_flags &= ~IFF_TEAM_PORT;
561 dev_put(team->dev);
562}
563
564static void __team_port_change_check(struct team_port *port, bool linkup);
565
566static int team_port_add(struct team *team, struct net_device *port_dev)
567{
568 struct net_device *dev = team->dev;
569 struct team_port *port;
570 char *portname = port_dev->name;
571 int err;
572
573 if (port_dev->flags & IFF_LOOPBACK ||
574 port_dev->type != ARPHRD_ETHER) {
575 netdev_err(dev, "Device %s is of an unsupported type\n",
576 portname);
577 return -EINVAL;
578 }
579
580 if (team_port_exists(port_dev)) {
581 netdev_err(dev, "Device %s is already a port "
582 "of a team device\n", portname);
583 return -EBUSY;
584 }
585
586 if (port_dev->flags & IFF_UP) {
587 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
588 portname);
589 return -EBUSY;
590 }
591
592 port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
593 if (!port)
594 return -ENOMEM;
595
596 port->dev = port_dev;
597 port->team = team;
598
599 port->orig.mtu = port_dev->mtu;
600 err = dev_set_mtu(port_dev, dev->mtu);
601 if (err) {
602 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
603 goto err_set_mtu;
604 }
605
606 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
607
608 err = team_port_enter(team, port);
609 if (err) {
610 netdev_err(dev, "Device %s failed to enter team mode\n",
611 portname);
612 goto err_port_enter;
613 }
614
615 err = dev_open(port_dev);
616 if (err) {
617 netdev_dbg(dev, "Device %s opening failed\n",
618 portname);
619 goto err_dev_open;
620 }
621
622 err = vlan_vids_add_by_dev(port_dev, dev);
623 if (err) {
624 netdev_err(dev, "Failed to add vlan ids to device %s\n",
625 portname);
626 goto err_vids_add;
627 }
628
629 err = netdev_set_master(port_dev, dev);
630 if (err) {
631 netdev_err(dev, "Device %s failed to set master\n", portname);
632 goto err_set_master;
633 }
634
635 err = netdev_rx_handler_register(port_dev, team_handle_frame,
636 port);
637 if (err) {
638 netdev_err(dev, "Device %s failed to register rx_handler\n",
639 portname);
640 goto err_handler_register;
641 }
642
643 team_port_list_add_port(team, port);
644 team_adjust_ops(team);
645 __team_compute_features(team);
646 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
647
648 netdev_info(dev, "Port device %s added\n", portname);
649
650 return 0;
651
652err_handler_register:
653 netdev_set_master(port_dev, NULL);
654
655err_set_master:
656 vlan_vids_del_by_dev(port_dev, dev);
657
658err_vids_add:
659 dev_close(port_dev);
660
661err_dev_open:
662 team_port_leave(team, port);
663 team_port_set_orig_mac(port);
664
665err_port_enter:
666 dev_set_mtu(port_dev, port->orig.mtu);
667
668err_set_mtu:
669 kfree(port);
670
671 return err;
672}
673
674static int team_port_del(struct team *team, struct net_device *port_dev)
675{
676 struct net_device *dev = team->dev;
677 struct team_port *port;
678 char *portname = port_dev->name;
679
680 port = team_port_get_rtnl(port_dev);
681 if (!port || !team_port_find(team, port)) {
682 netdev_err(dev, "Device %s does not act as a port of this team\n",
683 portname);
684 return -ENOENT;
685 }
686
687 port->removed = true;
688 __team_port_change_check(port, false);
689 team_port_list_del_port(team, port);
690 team_adjust_ops(team);
691 netdev_rx_handler_unregister(port_dev);
692 netdev_set_master(port_dev, NULL);
693 vlan_vids_del_by_dev(port_dev, dev);
694 dev_close(port_dev);
695 team_port_leave(team, port);
696 team_port_set_orig_mac(port);
697 dev_set_mtu(port_dev, port->orig.mtu);
698 synchronize_rcu();
699 kfree(port);
700 netdev_info(dev, "Port device %s removed\n", portname);
701 __team_compute_features(team);
702
703 return 0;
704}
705
706
707/*****************
708 * Net device ops
709 *****************/
710
711static const char team_no_mode_kind[] = "*NOMODE*";
712
713static int team_mode_option_get(struct team *team, void *arg)
714{
715 const char **str = arg;
716
717 *str = team->mode ? team->mode->kind : team_no_mode_kind;
718 return 0;
719}
720
721static int team_mode_option_set(struct team *team, void *arg)
722{
723 const char **str = arg;
724
725 return team_change_mode(team, *str);
726}
727
728static const struct team_option team_options[] = {
729 {
730 .name = "mode",
731 .type = TEAM_OPTION_TYPE_STRING,
732 .getter = team_mode_option_get,
733 .setter = team_mode_option_set,
734 },
735};
736
737static int team_init(struct net_device *dev)
738{
739 struct team *team = netdev_priv(dev);
740 int i;
741 int err;
742
743 team->dev = dev;
744 mutex_init(&team->lock);
745
746 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
747 if (!team->pcpu_stats)
748 return -ENOMEM;
749
750 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
751 INIT_HLIST_HEAD(&team->port_hlist[i]);
752 INIT_LIST_HEAD(&team->port_list);
753
754 team_adjust_ops(team);
755
756 INIT_LIST_HEAD(&team->option_list);
757 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
758 if (err)
759 goto err_options_register;
760 netif_carrier_off(dev);
761
762 return 0;
763
764err_options_register:
765 free_percpu(team->pcpu_stats);
766
767 return err;
768}
769
770static void team_uninit(struct net_device *dev)
771{
772 struct team *team = netdev_priv(dev);
773 struct team_port *port;
774 struct team_port *tmp;
775
776 mutex_lock(&team->lock);
777 list_for_each_entry_safe(port, tmp, &team->port_list, list)
778 team_port_del(team, port->dev);
779
780 __team_change_mode(team, NULL); /* cleanup */
781 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
782 mutex_unlock(&team->lock);
783}
784
785static void team_destructor(struct net_device *dev)
786{
787 struct team *team = netdev_priv(dev);
788
789 free_percpu(team->pcpu_stats);
790 free_netdev(dev);
791}
792
793static int team_open(struct net_device *dev)
794{
795 netif_carrier_on(dev);
796 return 0;
797}
798
799static int team_close(struct net_device *dev)
800{
801 netif_carrier_off(dev);
802 return 0;
803}
804
805/*
806 * note: already called with rcu_read_lock
807 */
808static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
809{
810 struct team *team = netdev_priv(dev);
811 bool tx_success = false;
812 unsigned int len = skb->len;
813
814 tx_success = team->ops.transmit(team, skb);
815 if (tx_success) {
816 struct team_pcpu_stats *pcpu_stats;
817
818 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
819 u64_stats_update_begin(&pcpu_stats->syncp);
820 pcpu_stats->tx_packets++;
821 pcpu_stats->tx_bytes += len;
822 u64_stats_update_end(&pcpu_stats->syncp);
823 } else {
824 this_cpu_inc(team->pcpu_stats->tx_dropped);
825 }
826
827 return NETDEV_TX_OK;
828}
829
830static void team_change_rx_flags(struct net_device *dev, int change)
831{
832 struct team *team = netdev_priv(dev);
833 struct team_port *port;
834 int inc;
835
836 rcu_read_lock();
837 list_for_each_entry_rcu(port, &team->port_list, list) {
838 if (change & IFF_PROMISC) {
839 inc = dev->flags & IFF_PROMISC ? 1 : -1;
840 dev_set_promiscuity(port->dev, inc);
841 }
842 if (change & IFF_ALLMULTI) {
843 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
844 dev_set_allmulti(port->dev, inc);
845 }
846 }
847 rcu_read_unlock();
848}
849
850static void team_set_rx_mode(struct net_device *dev)
851{
852 struct team *team = netdev_priv(dev);
853 struct team_port *port;
854
855 rcu_read_lock();
856 list_for_each_entry_rcu(port, &team->port_list, list) {
857 dev_uc_sync(port->dev, dev);
858 dev_mc_sync(port->dev, dev);
859 }
860 rcu_read_unlock();
861}
862
863static int team_set_mac_address(struct net_device *dev, void *p)
864{
865 struct team *team = netdev_priv(dev);
866 struct team_port *port;
867 struct sockaddr *addr = p;
868
869 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
870 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
871 rcu_read_lock();
872 list_for_each_entry_rcu(port, &team->port_list, list)
873 if (team->ops.port_change_mac)
874 team->ops.port_change_mac(team, port);
875 rcu_read_unlock();
876 return 0;
877}
878
879static int team_change_mtu(struct net_device *dev, int new_mtu)
880{
881 struct team *team = netdev_priv(dev);
882 struct team_port *port;
883 int err;
884
885 /*
886 * Alhough this is reader, it's guarded by team lock. It's not possible
887 * to traverse list in reverse under rcu_read_lock
888 */
889 mutex_lock(&team->lock);
890 team->port_mtu_change_allowed = true;
891 list_for_each_entry(port, &team->port_list, list) {
892 err = dev_set_mtu(port->dev, new_mtu);
893 if (err) {
894 netdev_err(dev, "Device %s failed to change mtu",
895 port->dev->name);
896 goto unwind;
897 }
898 }
899 team->port_mtu_change_allowed = false;
900 mutex_unlock(&team->lock);
901
902 dev->mtu = new_mtu;
903
904 return 0;
905
906unwind:
907 list_for_each_entry_continue_reverse(port, &team->port_list, list)
908 dev_set_mtu(port->dev, dev->mtu);
909 team->port_mtu_change_allowed = false;
910 mutex_unlock(&team->lock);
911
912 return err;
913}
914
915static struct rtnl_link_stats64 *
916team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
917{
918 struct team *team = netdev_priv(dev);
919 struct team_pcpu_stats *p;
920 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
921 u32 rx_dropped = 0, tx_dropped = 0;
922 unsigned int start;
923 int i;
924
925 for_each_possible_cpu(i) {
926 p = per_cpu_ptr(team->pcpu_stats, i);
927 do {
928 start = u64_stats_fetch_begin_bh(&p->syncp);
929 rx_packets = p->rx_packets;
930 rx_bytes = p->rx_bytes;
931 rx_multicast = p->rx_multicast;
932 tx_packets = p->tx_packets;
933 tx_bytes = p->tx_bytes;
934 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
935
936 stats->rx_packets += rx_packets;
937 stats->rx_bytes += rx_bytes;
938 stats->multicast += rx_multicast;
939 stats->tx_packets += tx_packets;
940 stats->tx_bytes += tx_bytes;
941 /*
942 * rx_dropped & tx_dropped are u32, updated
943 * without syncp protection.
944 */
945 rx_dropped += p->rx_dropped;
946 tx_dropped += p->tx_dropped;
947 }
948 stats->rx_dropped = rx_dropped;
949 stats->tx_dropped = tx_dropped;
950 return stats;
951}
952
953static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
954{
955 struct team *team = netdev_priv(dev);
956 struct team_port *port;
957 int err;
958
959 /*
960 * Alhough this is reader, it's guarded by team lock. It's not possible
961 * to traverse list in reverse under rcu_read_lock
962 */
963 mutex_lock(&team->lock);
964 list_for_each_entry(port, &team->port_list, list) {
965 err = vlan_vid_add(port->dev, vid);
966 if (err)
967 goto unwind;
968 }
969 mutex_unlock(&team->lock);
970
971 return 0;
972
973unwind:
974 list_for_each_entry_continue_reverse(port, &team->port_list, list)
975 vlan_vid_del(port->dev, vid);
976 mutex_unlock(&team->lock);
977
978 return err;
979}
980
981static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
982{
983 struct team *team = netdev_priv(dev);
984 struct team_port *port;
985
986 rcu_read_lock();
987 list_for_each_entry_rcu(port, &team->port_list, list)
988 vlan_vid_del(port->dev, vid);
989 rcu_read_unlock();
990
991 return 0;
992}
993
994static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
995{
996 struct team *team = netdev_priv(dev);
997 int err;
998
999 mutex_lock(&team->lock);
1000 err = team_port_add(team, port_dev);
1001 mutex_unlock(&team->lock);
1002 return err;
1003}
1004
1005static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1006{
1007 struct team *team = netdev_priv(dev);
1008 int err;
1009
1010 mutex_lock(&team->lock);
1011 err = team_port_del(team, port_dev);
1012 mutex_unlock(&team->lock);
1013 return err;
1014}
1015
1016static netdev_features_t team_fix_features(struct net_device *dev,
1017 netdev_features_t features)
1018{
1019 struct team_port *port;
1020 struct team *team = netdev_priv(dev);
1021 netdev_features_t mask;
1022
1023 mask = features;
1024 features &= ~NETIF_F_ONE_FOR_ALL;
1025 features |= NETIF_F_ALL_FOR_ALL;
1026
1027 rcu_read_lock();
1028 list_for_each_entry_rcu(port, &team->port_list, list) {
1029 features = netdev_increment_features(features,
1030 port->dev->features,
1031 mask);
1032 }
1033 rcu_read_unlock();
1034 return features;
1035}
1036
1037static const struct net_device_ops team_netdev_ops = {
1038 .ndo_init = team_init,
1039 .ndo_uninit = team_uninit,
1040 .ndo_open = team_open,
1041 .ndo_stop = team_close,
1042 .ndo_start_xmit = team_xmit,
1043 .ndo_change_rx_flags = team_change_rx_flags,
1044 .ndo_set_rx_mode = team_set_rx_mode,
1045 .ndo_set_mac_address = team_set_mac_address,
1046 .ndo_change_mtu = team_change_mtu,
1047 .ndo_get_stats64 = team_get_stats64,
1048 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
1049 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
1050 .ndo_add_slave = team_add_slave,
1051 .ndo_del_slave = team_del_slave,
1052 .ndo_fix_features = team_fix_features,
1053};
1054
1055
1056/***********************
1057 * rt netlink interface
1058 ***********************/
1059
1060static void team_setup(struct net_device *dev)
1061{
1062 ether_setup(dev);
1063
1064 dev->netdev_ops = &team_netdev_ops;
1065 dev->destructor = team_destructor;
1066 dev->tx_queue_len = 0;
1067 dev->flags |= IFF_MULTICAST;
1068 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1069
1070 /*
1071 * Indicate we support unicast address filtering. That way core won't
1072 * bring us to promisc mode in case a unicast addr is added.
1073 * Let this up to underlay drivers.
1074 */
1075 dev->priv_flags |= IFF_UNICAST_FLT;
1076
1077 dev->features |= NETIF_F_LLTX;
1078 dev->features |= NETIF_F_GRO;
1079 dev->hw_features = NETIF_F_HW_VLAN_TX |
1080 NETIF_F_HW_VLAN_RX |
1081 NETIF_F_HW_VLAN_FILTER;
1082
1083 dev->features |= dev->hw_features;
1084}
1085
1086static int team_newlink(struct net *src_net, struct net_device *dev,
1087 struct nlattr *tb[], struct nlattr *data[])
1088{
1089 int err;
1090
1091 if (tb[IFLA_ADDRESS] == NULL)
1092 eth_hw_addr_random(dev);
1093
1094 err = register_netdevice(dev);
1095 if (err)
1096 return err;
1097
1098 return 0;
1099}
1100
1101static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1102{
1103 if (tb[IFLA_ADDRESS]) {
1104 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1105 return -EINVAL;
1106 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1107 return -EADDRNOTAVAIL;
1108 }
1109 return 0;
1110}
1111
1112static struct rtnl_link_ops team_link_ops __read_mostly = {
1113 .kind = DRV_NAME,
1114 .priv_size = sizeof(struct team),
1115 .setup = team_setup,
1116 .newlink = team_newlink,
1117 .validate = team_validate,
1118};
1119
1120
1121/***********************************
1122 * Generic netlink custom interface
1123 ***********************************/
1124
1125static struct genl_family team_nl_family = {
1126 .id = GENL_ID_GENERATE,
1127 .name = TEAM_GENL_NAME,
1128 .version = TEAM_GENL_VERSION,
1129 .maxattr = TEAM_ATTR_MAX,
1130 .netnsok = true,
1131};
1132
1133static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1134 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
1135 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
1136 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
1137 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
1138};
1139
1140static const struct nla_policy
1141team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1142 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
1143 [TEAM_ATTR_OPTION_NAME] = {
1144 .type = NLA_STRING,
1145 .len = TEAM_STRING_MAX_LEN,
1146 },
1147 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1148 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1149 [TEAM_ATTR_OPTION_DATA] = {
1150 .type = NLA_BINARY,
1151 .len = TEAM_STRING_MAX_LEN,
1152 },
1153};
1154
1155static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1156{
1157 struct sk_buff *msg;
1158 void *hdr;
1159 int err;
1160
1161 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1162 if (!msg)
1163 return -ENOMEM;
1164
1165 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1166 &team_nl_family, 0, TEAM_CMD_NOOP);
1167 if (IS_ERR(hdr)) {
1168 err = PTR_ERR(hdr);
1169 goto err_msg_put;
1170 }
1171
1172 genlmsg_end(msg, hdr);
1173
1174 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1175
1176err_msg_put:
1177 nlmsg_free(msg);
1178
1179 return err;
1180}
1181
1182/*
1183 * Netlink cmd functions should be locked by following two functions.
1184 * Since dev gets held here, that ensures dev won't disappear in between.
1185 */
1186static struct team *team_nl_team_get(struct genl_info *info)
1187{
1188 struct net *net = genl_info_net(info);
1189 int ifindex;
1190 struct net_device *dev;
1191 struct team *team;
1192
1193 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1194 return NULL;
1195
1196 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1197 dev = dev_get_by_index(net, ifindex);
1198 if (!dev || dev->netdev_ops != &team_netdev_ops) {
1199 if (dev)
1200 dev_put(dev);
1201 return NULL;
1202 }
1203
1204 team = netdev_priv(dev);
1205 mutex_lock(&team->lock);
1206 return team;
1207}
1208
1209static void team_nl_team_put(struct team *team)
1210{
1211 mutex_unlock(&team->lock);
1212 dev_put(team->dev);
1213}
1214
1215static int team_nl_send_generic(struct genl_info *info, struct team *team,
1216 int (*fill_func)(struct sk_buff *skb,
1217 struct genl_info *info,
1218 int flags, struct team *team))
1219{
1220 struct sk_buff *skb;
1221 int err;
1222
1223 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1224 if (!skb)
1225 return -ENOMEM;
1226
1227 err = fill_func(skb, info, NLM_F_ACK, team);
1228 if (err < 0)
1229 goto err_fill;
1230
1231 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1232 return err;
1233
1234err_fill:
1235 nlmsg_free(skb);
1236 return err;
1237}
1238
1239static int team_nl_fill_options_get(struct sk_buff *skb,
1240 u32 pid, u32 seq, int flags,
1241 struct team *team, bool fillall)
1242{
1243 struct nlattr *option_list;
1244 void *hdr;
1245 struct team_option *option;
1246
1247 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1248 TEAM_CMD_OPTIONS_GET);
1249 if (IS_ERR(hdr))
1250 return PTR_ERR(hdr);
1251
1252 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
1253 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1254 if (!option_list)
1255 return -EMSGSIZE;
1256
1257 list_for_each_entry(option, &team->option_list, list) {
1258 struct nlattr *option_item;
1259 long arg;
1260
1261 /* Include only changed options if fill all mode is not on */
1262 if (!fillall && !option->changed)
1263 continue;
1264 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1265 if (!option_item)
1266 goto nla_put_failure;
1267 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
1268 if (option->changed) {
1269 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
1270 option->changed = false;
1271 }
1272 if (option->removed)
1273 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED);
1274 switch (option->type) {
1275 case TEAM_OPTION_TYPE_U32:
1276 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
1277 team_option_get(team, option, &arg);
1278 NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
1279 break;
1280 case TEAM_OPTION_TYPE_STRING:
1281 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
1282 team_option_get(team, option, &arg);
1283 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
1284 (char *) arg);
1285 break;
1286 default:
1287 BUG();
1288 }
1289 nla_nest_end(skb, option_item);
1290 }
1291
1292 nla_nest_end(skb, option_list);
1293 return genlmsg_end(skb, hdr);
1294
1295nla_put_failure:
1296 genlmsg_cancel(skb, hdr);
1297 return -EMSGSIZE;
1298}
1299
1300static int team_nl_fill_options_get_all(struct sk_buff *skb,
1301 struct genl_info *info, int flags,
1302 struct team *team)
1303{
1304 return team_nl_fill_options_get(skb, info->snd_pid,
1305 info->snd_seq, NLM_F_ACK,
1306 team, true);
1307}
1308
1309static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1310{
1311 struct team *team;
1312 int err;
1313
1314 team = team_nl_team_get(info);
1315 if (!team)
1316 return -EINVAL;
1317
1318 err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
1319
1320 team_nl_team_put(team);
1321
1322 return err;
1323}
1324
1325static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1326{
1327 struct team *team;
1328 int err = 0;
1329 int i;
1330 struct nlattr *nl_option;
1331
1332 team = team_nl_team_get(info);
1333 if (!team)
1334 return -EINVAL;
1335
1336 err = -EINVAL;
1337 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1338 err = -EINVAL;
1339 goto team_put;
1340 }
1341
1342 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1343 struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
1344 enum team_option_type opt_type;
1345 struct team_option *option;
1346 char *opt_name;
1347 bool opt_found = false;
1348
1349 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1350 err = -EINVAL;
1351 goto team_put;
1352 }
1353 err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
1354 nl_option, team_nl_option_policy);
1355 if (err)
1356 goto team_put;
1357 if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
1358 !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
1359 !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
1360 err = -EINVAL;
1361 goto team_put;
1362 }
1363 switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
1364 case NLA_U32:
1365 opt_type = TEAM_OPTION_TYPE_U32;
1366 break;
1367 case NLA_STRING:
1368 opt_type = TEAM_OPTION_TYPE_STRING;
1369 break;
1370 default:
1371 goto team_put;
1372 }
1373
1374 opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
1375 list_for_each_entry(option, &team->option_list, list) {
1376 long arg;
1377 struct nlattr *opt_data_attr;
1378
1379 if (option->type != opt_type ||
1380 strcmp(option->name, opt_name))
1381 continue;
1382 opt_found = true;
1383 opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
1384 switch (opt_type) {
1385 case TEAM_OPTION_TYPE_U32:
1386 arg = nla_get_u32(opt_data_attr);
1387 break;
1388 case TEAM_OPTION_TYPE_STRING:
1389 arg = (long) nla_data(opt_data_attr);
1390 break;
1391 default:
1392 BUG();
1393 }
1394 err = team_option_set(team, option, &arg);
1395 if (err)
1396 goto team_put;
1397 }
1398 if (!opt_found) {
1399 err = -ENOENT;
1400 goto team_put;
1401 }
1402 }
1403
1404team_put:
1405 team_nl_team_put(team);
1406
1407 return err;
1408}
1409
1410static int team_nl_fill_port_list_get(struct sk_buff *skb,
1411 u32 pid, u32 seq, int flags,
1412 struct team *team,
1413 bool fillall)
1414{
1415 struct nlattr *port_list;
1416 void *hdr;
1417 struct team_port *port;
1418
1419 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1420 TEAM_CMD_PORT_LIST_GET);
1421 if (IS_ERR(hdr))
1422 return PTR_ERR(hdr);
1423
1424 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
1425 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1426 if (!port_list)
1427 return -EMSGSIZE;
1428
1429 list_for_each_entry(port, &team->port_list, list) {
1430 struct nlattr *port_item;
1431
1432 /* Include only changed ports if fill all mode is not on */
1433 if (!fillall && !port->changed)
1434 continue;
1435 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1436 if (!port_item)
1437 goto nla_put_failure;
1438 NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
1439 if (port->changed) {
1440 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
1441 port->changed = false;
1442 }
1443 if (port->removed)
1444 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED);
1445 if (port->linkup)
1446 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
1447 NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
1448 NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
1449 nla_nest_end(skb, port_item);
1450 }
1451
1452 nla_nest_end(skb, port_list);
1453 return genlmsg_end(skb, hdr);
1454
1455nla_put_failure:
1456 genlmsg_cancel(skb, hdr);
1457 return -EMSGSIZE;
1458}
1459
1460static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
1461 struct genl_info *info, int flags,
1462 struct team *team)
1463{
1464 return team_nl_fill_port_list_get(skb, info->snd_pid,
1465 info->snd_seq, NLM_F_ACK,
1466 team, true);
1467}
1468
1469static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1470 struct genl_info *info)
1471{
1472 struct team *team;
1473 int err;
1474
1475 team = team_nl_team_get(info);
1476 if (!team)
1477 return -EINVAL;
1478
1479 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
1480
1481 team_nl_team_put(team);
1482
1483 return err;
1484}
1485
1486static struct genl_ops team_nl_ops[] = {
1487 {
1488 .cmd = TEAM_CMD_NOOP,
1489 .doit = team_nl_cmd_noop,
1490 .policy = team_nl_policy,
1491 },
1492 {
1493 .cmd = TEAM_CMD_OPTIONS_SET,
1494 .doit = team_nl_cmd_options_set,
1495 .policy = team_nl_policy,
1496 .flags = GENL_ADMIN_PERM,
1497 },
1498 {
1499 .cmd = TEAM_CMD_OPTIONS_GET,
1500 .doit = team_nl_cmd_options_get,
1501 .policy = team_nl_policy,
1502 .flags = GENL_ADMIN_PERM,
1503 },
1504 {
1505 .cmd = TEAM_CMD_PORT_LIST_GET,
1506 .doit = team_nl_cmd_port_list_get,
1507 .policy = team_nl_policy,
1508 .flags = GENL_ADMIN_PERM,
1509 },
1510};
1511
1512static struct genl_multicast_group team_change_event_mcgrp = {
1513 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1514};
1515
1516static int team_nl_send_event_options_get(struct team *team)
1517{
1518 struct sk_buff *skb;
1519 int err;
1520 struct net *net = dev_net(team->dev);
1521
1522 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1523 if (!skb)
1524 return -ENOMEM;
1525
1526 err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
1527 if (err < 0)
1528 goto err_fill;
1529
1530 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1531 GFP_KERNEL);
1532 return err;
1533
1534err_fill:
1535 nlmsg_free(skb);
1536 return err;
1537}
1538
1539static int team_nl_send_event_port_list_get(struct team *team)
1540{
1541 struct sk_buff *skb;
1542 int err;
1543 struct net *net = dev_net(team->dev);
1544
1545 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1546 if (!skb)
1547 return -ENOMEM;
1548
1549 err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
1550 if (err < 0)
1551 goto err_fill;
1552
1553 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1554 GFP_KERNEL);
1555 return err;
1556
1557err_fill:
1558 nlmsg_free(skb);
1559 return err;
1560}
1561
1562static int team_nl_init(void)
1563{
1564 int err;
1565
1566 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
1567 ARRAY_SIZE(team_nl_ops));
1568 if (err)
1569 return err;
1570
1571 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
1572 if (err)
1573 goto err_change_event_grp_reg;
1574
1575 return 0;
1576
1577err_change_event_grp_reg:
1578 genl_unregister_family(&team_nl_family);
1579
1580 return err;
1581}
1582
1583static void team_nl_fini(void)
1584{
1585 genl_unregister_family(&team_nl_family);
1586}
1587
1588
1589/******************
1590 * Change checkers
1591 ******************/
1592
1593static void __team_options_change_check(struct team *team)
1594{
1595 int err;
1596
1597 err = team_nl_send_event_options_get(team);
1598 if (err)
1599 netdev_warn(team->dev, "Failed to send options change via netlink\n");
1600}
1601
1602/* rtnl lock is held */
1603static void __team_port_change_check(struct team_port *port, bool linkup)
1604{
1605 int err;
1606
1607 if (!port->removed && port->linkup == linkup)
1608 return;
1609
1610 port->changed = true;
1611 port->linkup = linkup;
1612 if (linkup) {
1613 struct ethtool_cmd ecmd;
1614
1615 err = __ethtool_get_settings(port->dev, &ecmd);
1616 if (!err) {
1617 port->speed = ethtool_cmd_speed(&ecmd);
1618 port->duplex = ecmd.duplex;
1619 goto send_event;
1620 }
1621 }
1622 port->speed = 0;
1623 port->duplex = 0;
1624
1625send_event:
1626 err = team_nl_send_event_port_list_get(port->team);
1627 if (err)
1628 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
1629 port->dev->name);
1630
1631}
1632
1633static void team_port_change_check(struct team_port *port, bool linkup)
1634{
1635 struct team *team = port->team;
1636
1637 mutex_lock(&team->lock);
1638 __team_port_change_check(port, linkup);
1639 mutex_unlock(&team->lock);
1640}
1641
1642/************************************
1643 * Net device notifier event handler
1644 ************************************/
1645
1646static int team_device_event(struct notifier_block *unused,
1647 unsigned long event, void *ptr)
1648{
1649 struct net_device *dev = (struct net_device *) ptr;
1650 struct team_port *port;
1651
1652 port = team_port_get_rtnl(dev);
1653 if (!port)
1654 return NOTIFY_DONE;
1655
1656 switch (event) {
1657 case NETDEV_UP:
1658 if (netif_carrier_ok(dev))
1659 team_port_change_check(port, true);
1660 case NETDEV_DOWN:
1661 team_port_change_check(port, false);
1662 case NETDEV_CHANGE:
1663 if (netif_running(port->dev))
1664 team_port_change_check(port,
1665 !!netif_carrier_ok(port->dev));
1666 break;
1667 case NETDEV_UNREGISTER:
1668 team_del_slave(port->team->dev, dev);
1669 break;
1670 case NETDEV_FEAT_CHANGE:
1671 team_compute_features(port->team);
1672 break;
1673 case NETDEV_CHANGEMTU:
1674 /* Forbid to change mtu of underlaying device */
1675 if (!port->team->port_mtu_change_allowed)
1676 return NOTIFY_BAD;
1677 break;
1678 case NETDEV_PRE_TYPE_CHANGE:
1679 /* Forbid to change type of underlaying device */
1680 return NOTIFY_BAD;
1681 }
1682 return NOTIFY_DONE;
1683}
1684
1685static struct notifier_block team_notifier_block __read_mostly = {
1686 .notifier_call = team_device_event,
1687};
1688
1689
1690/***********************
1691 * Module init and exit
1692 ***********************/
1693
1694static int __init team_module_init(void)
1695{
1696 int err;
1697
1698 register_netdevice_notifier(&team_notifier_block);
1699
1700 err = rtnl_link_register(&team_link_ops);
1701 if (err)
1702 goto err_rtnl_reg;
1703
1704 err = team_nl_init();
1705 if (err)
1706 goto err_nl_init;
1707
1708 return 0;
1709
1710err_nl_init:
1711 rtnl_link_unregister(&team_link_ops);
1712
1713err_rtnl_reg:
1714 unregister_netdevice_notifier(&team_notifier_block);
1715
1716 return err;
1717}
1718
1719static void __exit team_module_exit(void)
1720{
1721 team_nl_fini();
1722 rtnl_link_unregister(&team_link_ops);
1723 unregister_netdevice_notifier(&team_notifier_block);
1724}
1725
1726module_init(team_module_init);
1727module_exit(team_module_exit);
1728
1729MODULE_LICENSE("GPL v2");
1730MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
1731MODULE_DESCRIPTION("Ethernet team device driver");
1732MODULE_ALIAS_RTNL_LINK(DRV_NAME);