blob: 9579bb1f88c2ecf7b674b1dd0dddc0c6b3d0dc11 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/* -*- linux-c -*-
2 * INET 802.1Q VLAN
3 * Ethernet-type device handling.
4 *
5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: netdev@vger.kernel.org
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8 *
9 * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
10 * - reset skb->pkt_type on incoming packets when MAC was changed
11 * - see that changed MAC is saddr for outgoing packets
12 * Oct 20, 2001: Ard van Breeman:
13 * - Fix MC-list, finally.
14 * - Flush MC-list on VLAN destroy.
15 *
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/skbuff.h>
28#include <linux/netdevice.h>
29#include <linux/net_tstamp.h>
30#include <linux/etherdevice.h>
31#include <linux/ethtool.h>
32#include <linux/phy.h>
33#include <net/arp.h>
34#include <net/switchdev.h>
35#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
36#include <linux/netfilter.h>
37#include <net/netfilter/nf_flow_table.h>
38#endif
39
40#include "vlan.h"
41#include "vlanproc.h"
42#include <linux/if_vlan.h>
43#include <linux/netpoll.h>
44
45/*
46 * Create the VLAN header for an arbitrary protocol layer
47 *
48 * saddr=NULL means use device source address
49 * daddr=NULL means leave destination address (eg unresolved arp)
50 *
51 * This is called when the SKB is moving down the stack towards the
52 * physical devices.
53 */
54static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
55 unsigned short type,
56 const void *daddr, const void *saddr,
57 unsigned int len)
58{
59 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
60 struct vlan_hdr *vhdr;
61 unsigned int vhdrlen = 0;
62 u16 vlan_tci = 0;
63 int rc;
64
65 if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
66 vhdr = skb_push(skb, VLAN_HLEN);
67
68 vlan_tci = vlan->vlan_id;
69 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
70 vhdr->h_vlan_TCI = htons(vlan_tci);
71
72 /*
73 * Set the protocol type. For a packet of type ETH_P_802_3/2 we
74 * put the length in here instead.
75 */
76 if (type != ETH_P_802_3 && type != ETH_P_802_2)
77 vhdr->h_vlan_encapsulated_proto = htons(type);
78 else
79 vhdr->h_vlan_encapsulated_proto = htons(len);
80
81 skb->protocol = vlan->vlan_proto;
82 type = ntohs(vlan->vlan_proto);
83 vhdrlen = VLAN_HLEN;
84 }
85
86 /* Before delegating work to the lower layer, enter our MAC-address */
87 if (saddr == NULL)
88 saddr = dev->dev_addr;
89
90 /* Now make the underlying real hard header */
91 dev = vlan->real_dev;
92 rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
93 if (rc > 0)
94 rc += vhdrlen;
95 return rc;
96}
97
98static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
99{
100#ifdef CONFIG_NET_POLL_CONTROLLER
101 if (vlan->netpoll)
102 netpoll_send_skb(vlan->netpoll, skb);
103#else
104 BUG();
105#endif
106 return NETDEV_TX_OK;
107}
108
109static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
110 struct net_device *dev)
111{
112 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
113 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
114 unsigned int len;
115 int ret;
116
117 /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
118 *
119 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
120 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
121 */
122 if (veth->h_vlan_proto != vlan->vlan_proto ||
123 vlan->flags & VLAN_FLAG_REORDER_HDR) {
124 u16 vlan_tci;
125 vlan_tci = vlan->vlan_id;
126 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
127 __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
128 }
129
130 skb->dev = vlan->real_dev;
131 len = skb->len;
132 if (unlikely(netpoll_tx_running(dev)))
133 return vlan_netpoll_send_skb(vlan, skb);
134
135 ret = dev_queue_xmit(skb);
136
137 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
138 struct vlan_pcpu_stats *stats;
139
140 stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
141 u64_stats_update_begin(&stats->syncp);
142 stats->tx_packets++;
143 stats->tx_bytes += len;
144 u64_stats_update_end(&stats->syncp);
145 } else {
146 this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
147 }
148
149 return ret;
150}
151
152static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
153{
154 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
155 unsigned int max_mtu = real_dev->mtu;
156
157 if (netif_reduces_vlan_mtu(real_dev))
158 max_mtu -= VLAN_HLEN;
159 if (max_mtu < new_mtu)
160 return -ERANGE;
161
162 dev->mtu = new_mtu;
163
164 return 0;
165}
166
167void vlan_dev_set_ingress_priority(const struct net_device *dev,
168 u32 skb_prio, u16 vlan_prio)
169{
170 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
171
172 if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
173 vlan->nr_ingress_mappings--;
174 else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
175 vlan->nr_ingress_mappings++;
176
177 vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
178}
179
180int vlan_dev_set_egress_priority(const struct net_device *dev,
181 u32 skb_prio, u16 vlan_prio)
182{
183 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
184 struct vlan_priority_tci_mapping *mp = NULL;
185 struct vlan_priority_tci_mapping *np;
186 u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
187
188 /* See if a priority mapping exists.. */
189 mp = vlan->egress_priority_map[skb_prio & 0xF];
190 while (mp) {
191 if (mp->priority == skb_prio) {
192 if (mp->vlan_qos && !vlan_qos)
193 vlan->nr_egress_mappings--;
194 else if (!mp->vlan_qos && vlan_qos)
195 vlan->nr_egress_mappings++;
196 mp->vlan_qos = vlan_qos;
197 return 0;
198 }
199 mp = mp->next;
200 }
201
202 /* Create a new mapping then. */
203 mp = vlan->egress_priority_map[skb_prio & 0xF];
204 np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
205 if (!np)
206 return -ENOBUFS;
207
208 np->next = mp;
209 np->priority = skb_prio;
210 np->vlan_qos = vlan_qos;
211 /* Before inserting this element in hash table, make sure all its fields
212 * are committed to memory.
213 * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
214 */
215 smp_wmb();
216 vlan->egress_priority_map[skb_prio & 0xF] = np;
217 if (vlan_qos)
218 vlan->nr_egress_mappings++;
219 return 0;
220}
221
222/* Flags are defined in the vlan_flags enum in
223 * include/uapi/linux/if_vlan.h file.
224 */
225int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
226{
227 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
228 u32 old_flags = vlan->flags;
229
230 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
231 VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
232 return -EINVAL;
233
234 vlan->flags = (old_flags & ~mask) | (flags & mask);
235
236 if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
237 if (vlan->flags & VLAN_FLAG_GVRP)
238 vlan_gvrp_request_join(dev);
239 else
240 vlan_gvrp_request_leave(dev);
241 }
242
243 if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
244 if (vlan->flags & VLAN_FLAG_MVRP)
245 vlan_mvrp_request_join(dev);
246 else
247 vlan_mvrp_request_leave(dev);
248 }
249 return 0;
250}
251
252void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
253{
254 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
255}
256
257bool vlan_dev_inherit_address(struct net_device *dev,
258 struct net_device *real_dev)
259{
260 if (dev->addr_assign_type != NET_ADDR_STOLEN)
261 return false;
262
263 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
264 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
265 return true;
266}
267
268static int vlan_dev_open(struct net_device *dev)
269{
270 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
271 struct net_device *real_dev = vlan->real_dev;
272 int err;
273
274 if (!(real_dev->flags & IFF_UP) &&
275 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
276 return -ENETDOWN;
277
278 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
279 !vlan_dev_inherit_address(dev, real_dev)) {
280 err = dev_uc_add(real_dev, dev->dev_addr);
281 if (err < 0)
282 goto out;
283 }
284
285 if (dev->flags & IFF_ALLMULTI) {
286 err = dev_set_allmulti(real_dev, 1);
287 if (err < 0)
288 goto del_unicast;
289 }
290 if (dev->flags & IFF_PROMISC) {
291 err = dev_set_promiscuity(real_dev, 1);
292 if (err < 0)
293 goto clear_allmulti;
294 }
295
296 ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);
297
298 if (vlan->flags & VLAN_FLAG_GVRP)
299 vlan_gvrp_request_join(dev);
300
301 if (vlan->flags & VLAN_FLAG_MVRP)
302 vlan_mvrp_request_join(dev);
303
304 if (netif_carrier_ok(real_dev))
305 netif_carrier_on(dev);
306 return 0;
307
308clear_allmulti:
309 if (dev->flags & IFF_ALLMULTI)
310 dev_set_allmulti(real_dev, -1);
311del_unicast:
312 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
313 dev_uc_del(real_dev, dev->dev_addr);
314out:
315 netif_carrier_off(dev);
316 return err;
317}
318
319static int vlan_dev_stop(struct net_device *dev)
320{
321 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
322 struct net_device *real_dev = vlan->real_dev;
323
324 dev_mc_unsync(real_dev, dev);
325 dev_uc_unsync(real_dev, dev);
326 if (dev->flags & IFF_ALLMULTI)
327 dev_set_allmulti(real_dev, -1);
328 if (dev->flags & IFF_PROMISC)
329 dev_set_promiscuity(real_dev, -1);
330
331 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
332 dev_uc_del(real_dev, dev->dev_addr);
333
334 netif_carrier_off(dev);
335 return 0;
336}
337
338static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
339{
340 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
341 struct sockaddr *addr = p;
342 int err;
343
344 if (!is_valid_ether_addr(addr->sa_data))
345 return -EADDRNOTAVAIL;
346
347 if (!(dev->flags & IFF_UP))
348 goto out;
349
350 if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
351 err = dev_uc_add(real_dev, addr->sa_data);
352 if (err < 0)
353 return err;
354 }
355
356 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
357 dev_uc_del(real_dev, dev->dev_addr);
358
359out:
360 ether_addr_copy(dev->dev_addr, addr->sa_data);
361 return 0;
362}
363
364static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
365{
366 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
367 const struct net_device_ops *ops = real_dev->netdev_ops;
368 struct ifreq ifrr;
369 int err = -EOPNOTSUPP;
370
371 strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
372 ifrr.ifr_ifru = ifr->ifr_ifru;
373
374 switch (cmd) {
375 case SIOCSHWTSTAMP:
376 if (!net_eq(dev_net(dev), &init_net))
377 break;
378 case SIOCGMIIPHY:
379 case SIOCGMIIREG:
380 case SIOCSMIIREG:
381 case SIOCGHWTSTAMP:
382 if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
383 err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
384 break;
385 }
386
387 if (!err)
388 ifr->ifr_ifru = ifrr.ifr_ifru;
389
390 return err;
391}
392
393static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
394{
395 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
396 const struct net_device_ops *ops = real_dev->netdev_ops;
397 int err = 0;
398
399 if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
400 err = ops->ndo_neigh_setup(real_dev, pa);
401
402 return err;
403}
404
405#if IS_ENABLED(CONFIG_FCOE)
406static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
407 struct scatterlist *sgl, unsigned int sgc)
408{
409 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
410 const struct net_device_ops *ops = real_dev->netdev_ops;
411 int rc = 0;
412
413 if (ops->ndo_fcoe_ddp_setup)
414 rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
415
416 return rc;
417}
418
419static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
420{
421 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
422 const struct net_device_ops *ops = real_dev->netdev_ops;
423 int len = 0;
424
425 if (ops->ndo_fcoe_ddp_done)
426 len = ops->ndo_fcoe_ddp_done(real_dev, xid);
427
428 return len;
429}
430
431static int vlan_dev_fcoe_enable(struct net_device *dev)
432{
433 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
434 const struct net_device_ops *ops = real_dev->netdev_ops;
435 int rc = -EINVAL;
436
437 if (ops->ndo_fcoe_enable)
438 rc = ops->ndo_fcoe_enable(real_dev);
439 return rc;
440}
441
442static int vlan_dev_fcoe_disable(struct net_device *dev)
443{
444 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
445 const struct net_device_ops *ops = real_dev->netdev_ops;
446 int rc = -EINVAL;
447
448 if (ops->ndo_fcoe_disable)
449 rc = ops->ndo_fcoe_disable(real_dev);
450 return rc;
451}
452
453static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
454{
455 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
456 const struct net_device_ops *ops = real_dev->netdev_ops;
457 int rc = -EINVAL;
458
459 if (ops->ndo_fcoe_get_wwn)
460 rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
461 return rc;
462}
463
464static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
465 struct scatterlist *sgl, unsigned int sgc)
466{
467 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
468 const struct net_device_ops *ops = real_dev->netdev_ops;
469 int rc = 0;
470
471 if (ops->ndo_fcoe_ddp_target)
472 rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
473
474 return rc;
475}
476#endif
477
478static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
479{
480 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
481
482 if (dev->flags & IFF_UP) {
483 if (change & IFF_ALLMULTI)
484 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
485 if (change & IFF_PROMISC)
486 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
487 }
488}
489
490static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
491{
492 dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
493 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
494}
495
496/*
497 * vlan network devices have devices nesting below it, and are a special
498 * "super class" of normal network devices; split their locks off into a
499 * separate class since they always nest.
500 */
501static struct lock_class_key vlan_netdev_xmit_lock_key;
502static struct lock_class_key vlan_netdev_addr_lock_key;
503
504static void vlan_dev_set_lockdep_one(struct net_device *dev,
505 struct netdev_queue *txq,
506 void *_subclass)
507{
508 lockdep_set_class_and_subclass(&txq->_xmit_lock,
509 &vlan_netdev_xmit_lock_key,
510 *(int *)_subclass);
511}
512
513static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
514{
515 lockdep_set_class_and_subclass(&dev->addr_list_lock,
516 &vlan_netdev_addr_lock_key,
517 subclass);
518 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
519}
520
521static int vlan_dev_get_lock_subclass(struct net_device *dev)
522{
523 return vlan_dev_priv(dev)->nest_level;
524}
525
526static const struct header_ops vlan_header_ops = {
527 .create = vlan_dev_hard_header,
528 .parse = eth_header_parse,
529};
530
531static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
532 unsigned short type,
533 const void *daddr, const void *saddr,
534 unsigned int len)
535{
536 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
537 struct net_device *real_dev = vlan->real_dev;
538
539 if (saddr == NULL)
540 saddr = dev->dev_addr;
541
542 return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
543}
544
545static const struct header_ops vlan_passthru_header_ops = {
546 .create = vlan_passthru_hard_header,
547 .parse = eth_header_parse,
548};
549
550static struct device_type vlan_type = {
551 .name = "vlan",
552};
553
554static const struct net_device_ops vlan_netdev_ops;
555
556static int vlan_dev_init(struct net_device *dev)
557{
558 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
559
560 netif_carrier_off(dev);
561
562 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
563 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
564 IFF_MASTER | IFF_SLAVE);
565 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
566 (1<<__LINK_STATE_DORMANT))) |
567 (1<<__LINK_STATE_PRESENT);
568
569 dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
570 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
571 NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
572 NETIF_F_ALL_FCOE;
573
574 dev->features |= dev->hw_features | NETIF_F_LLTX;
575 dev->gso_max_size = real_dev->gso_max_size;
576 dev->gso_max_segs = real_dev->gso_max_segs;
577 if (dev->features & NETIF_F_VLAN_FEATURES)
578 netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
579
580 dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
581
582 /* ipv6 shared card related stuff */
583 dev->dev_id = real_dev->dev_id;
584
585 if (is_zero_ether_addr(dev->dev_addr)) {
586 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
587 dev->addr_assign_type = NET_ADDR_STOLEN;
588 }
589 if (is_zero_ether_addr(dev->broadcast))
590 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
591
592#if IS_ENABLED(CONFIG_FCOE)
593 dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
594#endif
595
596 dev->needed_headroom = real_dev->needed_headroom;
597 if (vlan_hw_offload_capable(real_dev->features,
598 vlan_dev_priv(dev)->vlan_proto)) {
599 dev->header_ops = &vlan_passthru_header_ops;
600 dev->hard_header_len = real_dev->hard_header_len;
601 } else {
602 dev->header_ops = &vlan_header_ops;
603 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
604 }
605
606 dev->netdev_ops = &vlan_netdev_ops;
607
608 SET_NETDEV_DEVTYPE(dev, &vlan_type);
609
610 vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
611
612 vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
613 if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
614 return -ENOMEM;
615
616 return 0;
617}
618
619/* Note: this function might be called multiple times for the same device. */
620void vlan_dev_uninit(struct net_device *dev)
621{
622 struct vlan_priority_tci_mapping *pm;
623 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
624 int i;
625
626 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
627 while ((pm = vlan->egress_priority_map[i]) != NULL) {
628 vlan->egress_priority_map[i] = pm->next;
629 kfree(pm);
630 }
631 }
632}
633
634static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
635 netdev_features_t features)
636{
637 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
638 netdev_features_t old_features = features;
639 netdev_features_t lower_features;
640
641 lower_features = netdev_intersect_features((real_dev->vlan_features |
642 NETIF_F_RXCSUM),
643 real_dev->features);
644
645 /* Add HW_CSUM setting to preserve user ability to control
646 * checksum offload on the vlan device.
647 */
648 if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
649 lower_features |= NETIF_F_HW_CSUM;
650 features = netdev_intersect_features(features, lower_features);
651 features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
652 features |= NETIF_F_LLTX;
653
654 return features;
655}
656
657static int vlan_ethtool_get_link_ksettings(struct net_device *dev,
658 struct ethtool_link_ksettings *cmd)
659{
660 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
661
662 return __ethtool_get_link_ksettings(vlan->real_dev, cmd);
663}
664
665static void vlan_ethtool_get_drvinfo(struct net_device *dev,
666 struct ethtool_drvinfo *info)
667{
668 strlcpy(info->driver, vlan_fullname, sizeof(info->driver));
669 strlcpy(info->version, vlan_version, sizeof(info->version));
670 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
671}
672
673static int vlan_ethtool_get_ts_info(struct net_device *dev,
674 struct ethtool_ts_info *info)
675{
676 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
677 const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
678 struct phy_device *phydev = vlan->real_dev->phydev;
679
680 if (phydev && phydev->drv && phydev->drv->ts_info) {
681 return phydev->drv->ts_info(phydev, info);
682 } else if (ops->get_ts_info) {
683 return ops->get_ts_info(vlan->real_dev, info);
684 } else {
685 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
686 SOF_TIMESTAMPING_SOFTWARE;
687 info->phc_index = -1;
688 }
689
690 return 0;
691}
692
693static void vlan_dev_get_stats64(struct net_device *dev,
694 struct rtnl_link_stats64 *stats)
695{
696 struct vlan_pcpu_stats *p;
697 u32 rx_errors = 0, tx_dropped = 0;
698 int i;
699
700 for_each_possible_cpu(i) {
701 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
702 unsigned int start;
703
704 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
705 do {
706 start = u64_stats_fetch_begin_irq(&p->syncp);
707 rxpackets = p->rx_packets;
708 rxbytes = p->rx_bytes;
709 rxmulticast = p->rx_multicast;
710 txpackets = p->tx_packets;
711 txbytes = p->tx_bytes;
712 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
713
714 stats->rx_packets += rxpackets;
715 stats->rx_bytes += rxbytes;
716 stats->multicast += rxmulticast;
717 stats->tx_packets += txpackets;
718 stats->tx_bytes += txbytes;
719 /* rx_errors & tx_dropped are u32 */
720 rx_errors += p->rx_errors;
721 tx_dropped += p->tx_dropped;
722 }
723 stats->rx_errors = rx_errors;
724 stats->tx_dropped = tx_dropped;
725}
726
727#ifdef CONFIG_NET_POLL_CONTROLLER
728static void vlan_dev_poll_controller(struct net_device *dev)
729{
730 return;
731}
732
733static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
734{
735 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
736 struct net_device *real_dev = vlan->real_dev;
737 struct netpoll *netpoll;
738 int err = 0;
739
740 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
741 err = -ENOMEM;
742 if (!netpoll)
743 goto out;
744
745 err = __netpoll_setup(netpoll, real_dev);
746 if (err) {
747 kfree(netpoll);
748 goto out;
749 }
750
751 vlan->netpoll = netpoll;
752
753out:
754 return err;
755}
756
757static void vlan_dev_netpoll_cleanup(struct net_device *dev)
758{
759 struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
760 struct netpoll *netpoll = vlan->netpoll;
761
762 if (!netpoll)
763 return;
764
765 vlan->netpoll = NULL;
766
767 __netpoll_free_async(netpoll);
768}
769#endif /* CONFIG_NET_POLL_CONTROLLER */
770
771static int vlan_dev_get_iflink(const struct net_device *dev)
772{
773 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
774
775 return real_dev->ifindex;
776}
777
778#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
779static int vlan_dev_flow_offload_check(struct flow_offload_hw_path *path)
780{
781 struct net_device *dev = path->dev;
782 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
783
784 if (path->flags & FLOW_OFFLOAD_PATH_VLAN)
785 return -EEXIST;
786
787 path->flags |= FLOW_OFFLOAD_PATH_VLAN;
788 path->vlan_proto = vlan->vlan_proto;
789 path->vlan_id = vlan->vlan_id;
790 path->dev = vlan->real_dev;
791
792 if (vlan->real_dev->netdev_ops->ndo_flow_offload_check)
793 return vlan->real_dev->netdev_ops->ndo_flow_offload_check(path);
794
795 return 0;
796}
797#endif /* CONFIG_NF_FLOW_TABLE */
798
799static const struct ethtool_ops vlan_ethtool_ops = {
800 .get_link_ksettings = vlan_ethtool_get_link_ksettings,
801 .get_drvinfo = vlan_ethtool_get_drvinfo,
802 .get_link = ethtool_op_get_link,
803 .get_ts_info = vlan_ethtool_get_ts_info,
804};
805
806static const struct net_device_ops vlan_netdev_ops = {
807 .ndo_change_mtu = vlan_dev_change_mtu,
808 .ndo_init = vlan_dev_init,
809 .ndo_uninit = vlan_dev_uninit,
810 .ndo_open = vlan_dev_open,
811 .ndo_stop = vlan_dev_stop,
812 .ndo_start_xmit = vlan_dev_hard_start_xmit,
813 .ndo_validate_addr = eth_validate_addr,
814 .ndo_set_mac_address = vlan_dev_set_mac_address,
815 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
816 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
817 .ndo_do_ioctl = vlan_dev_ioctl,
818 .ndo_neigh_setup = vlan_dev_neigh_setup,
819 .ndo_get_stats64 = vlan_dev_get_stats64,
820#if IS_ENABLED(CONFIG_FCOE)
821 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
822 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
823 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
824 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
825 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
826 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
827#endif
828#ifdef CONFIG_NET_POLL_CONTROLLER
829 .ndo_poll_controller = vlan_dev_poll_controller,
830 .ndo_netpoll_setup = vlan_dev_netpoll_setup,
831 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
832#endif
833 .ndo_fix_features = vlan_dev_fix_features,
834 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
835 .ndo_get_iflink = vlan_dev_get_iflink,
836#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
837 .ndo_flow_offload_check = vlan_dev_flow_offload_check,
838#endif
839};
840
841static void vlan_dev_free(struct net_device *dev)
842{
843 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
844
845 free_percpu(vlan->vlan_pcpu_stats);
846 vlan->vlan_pcpu_stats = NULL;
847}
848
849void vlan_setup(struct net_device *dev)
850{
851 ether_setup(dev);
852
853 dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
854 dev->priv_flags |= IFF_UNICAST_FLT;
855 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
856 netif_keep_dst(dev);
857
858 dev->netdev_ops = &vlan_netdev_ops;
859 dev->needs_free_netdev = true;
860 dev->priv_destructor = vlan_dev_free;
861 dev->ethtool_ops = &vlan_ethtool_ops;
862
863 dev->min_mtu = 0;
864 dev->max_mtu = ETH_MAX_MTU;
865
866 eth_zero_addr(dev->broadcast);
867}