blob: d1c1cbc324b13b68c083747b6f1e98bd442eaf3d [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drivers/net/macsec.c - MACsec device
4 *
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 */
7
8#include <linux/types.h>
9#include <linux/skbuff.h>
10#include <linux/socket.h>
11#include <linux/module.h>
12#include <crypto/aead.h>
13#include <linux/etherdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/refcount.h>
16#include <net/genetlink.h>
17#include <net/sock.h>
18#include <net/gro_cells.h>
19#include <linux/if_arp.h>
20
21#include <uapi/linux/if_macsec.h>
22
23typedef u64 __bitwise sci_t;
24
25#define MACSEC_SCI_LEN 8
26
27/* SecTAG length = macsec_eth_header without the optional SCI */
28#define MACSEC_TAG_LEN 6
29
30struct macsec_eth_header {
31 struct ethhdr eth;
32 /* SecTAG */
33 u8 tci_an;
34#if defined(__LITTLE_ENDIAN_BITFIELD)
35 u8 short_length:6,
36 unused:2;
37#elif defined(__BIG_ENDIAN_BITFIELD)
38 u8 unused:2,
39 short_length:6;
40#else
41#error "Please fix <asm/byteorder.h>"
42#endif
43 __be32 packet_number;
44 u8 secure_channel_id[8]; /* optional */
45} __packed;
46
47#define MACSEC_TCI_VERSION 0x80
48#define MACSEC_TCI_ES 0x40 /* end station */
49#define MACSEC_TCI_SC 0x20 /* SCI present */
50#define MACSEC_TCI_SCB 0x10 /* epon */
51#define MACSEC_TCI_E 0x08 /* encryption */
52#define MACSEC_TCI_C 0x04 /* changed text */
53#define MACSEC_AN_MASK 0x03 /* association number */
54#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
55
56/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
57#define MIN_NON_SHORT_LEN 48
58
59#define GCM_AES_IV_LEN 12
60#define DEFAULT_ICV_LEN 16
61
62#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
63
64#define for_each_rxsc(secy, sc) \
65 for (sc = rcu_dereference_bh(secy->rx_sc); \
66 sc; \
67 sc = rcu_dereference_bh(sc->next))
68#define for_each_rxsc_rtnl(secy, sc) \
69 for (sc = rtnl_dereference(secy->rx_sc); \
70 sc; \
71 sc = rtnl_dereference(sc->next))
72
73struct gcm_iv {
74 union {
75 u8 secure_channel_id[8];
76 sci_t sci;
77 };
78 __be32 pn;
79};
80
81/**
82 * struct macsec_key - SA key
83 * @id: user-provided key identifier
84 * @tfm: crypto struct, key storage
85 */
86struct macsec_key {
87 u8 id[MACSEC_KEYID_LEN];
88 struct crypto_aead *tfm;
89};
90
91struct macsec_rx_sc_stats {
92 __u64 InOctetsValidated;
93 __u64 InOctetsDecrypted;
94 __u64 InPktsUnchecked;
95 __u64 InPktsDelayed;
96 __u64 InPktsOK;
97 __u64 InPktsInvalid;
98 __u64 InPktsLate;
99 __u64 InPktsNotValid;
100 __u64 InPktsNotUsingSA;
101 __u64 InPktsUnusedSA;
102};
103
104struct macsec_rx_sa_stats {
105 __u32 InPktsOK;
106 __u32 InPktsInvalid;
107 __u32 InPktsNotValid;
108 __u32 InPktsNotUsingSA;
109 __u32 InPktsUnusedSA;
110};
111
112struct macsec_tx_sa_stats {
113 __u32 OutPktsProtected;
114 __u32 OutPktsEncrypted;
115};
116
117struct macsec_tx_sc_stats {
118 __u64 OutPktsProtected;
119 __u64 OutPktsEncrypted;
120 __u64 OutOctetsProtected;
121 __u64 OutOctetsEncrypted;
122};
123
124struct macsec_dev_stats {
125 __u64 OutPktsUntagged;
126 __u64 InPktsUntagged;
127 __u64 OutPktsTooLong;
128 __u64 InPktsNoTag;
129 __u64 InPktsBadTag;
130 __u64 InPktsUnknownSCI;
131 __u64 InPktsNoSCI;
132 __u64 InPktsOverrun;
133};
134
135/**
136 * struct macsec_rx_sa - receive secure association
137 * @active:
138 * @next_pn: packet number expected for the next packet
139 * @lock: protects next_pn manipulations
140 * @key: key structure
141 * @stats: per-SA stats
142 */
143struct macsec_rx_sa {
144 struct macsec_key key;
145 spinlock_t lock;
146 u32 next_pn;
147 refcount_t refcnt;
148 bool active;
149 struct macsec_rx_sa_stats __percpu *stats;
150 struct macsec_rx_sc *sc;
151 struct rcu_head rcu;
152};
153
154struct pcpu_rx_sc_stats {
155 struct macsec_rx_sc_stats stats;
156 struct u64_stats_sync syncp;
157};
158
159/**
160 * struct macsec_rx_sc - receive secure channel
161 * @sci: secure channel identifier for this SC
162 * @active: channel is active
163 * @sa: array of secure associations
164 * @stats: per-SC stats
165 */
166struct macsec_rx_sc {
167 struct macsec_rx_sc __rcu *next;
168 sci_t sci;
169 bool active;
170 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
171 struct pcpu_rx_sc_stats __percpu *stats;
172 refcount_t refcnt;
173 struct rcu_head rcu_head;
174};
175
176/**
177 * struct macsec_tx_sa - transmit secure association
178 * @active:
179 * @next_pn: packet number to use for the next packet
180 * @lock: protects next_pn manipulations
181 * @key: key structure
182 * @stats: per-SA stats
183 */
184struct macsec_tx_sa {
185 struct macsec_key key;
186 spinlock_t lock;
187 u32 next_pn;
188 refcount_t refcnt;
189 bool active;
190 struct macsec_tx_sa_stats __percpu *stats;
191 struct rcu_head rcu;
192};
193
194struct pcpu_tx_sc_stats {
195 struct macsec_tx_sc_stats stats;
196 struct u64_stats_sync syncp;
197};
198
199/**
200 * struct macsec_tx_sc - transmit secure channel
201 * @active:
202 * @encoding_sa: association number of the SA currently in use
203 * @encrypt: encrypt packets on transmit, or authenticate only
204 * @send_sci: always include the SCI in the SecTAG
205 * @end_station:
206 * @scb: single copy broadcast flag
207 * @sa: array of secure associations
208 * @stats: stats for this TXSC
209 */
210struct macsec_tx_sc {
211 bool active;
212 u8 encoding_sa;
213 bool encrypt;
214 bool send_sci;
215 bool end_station;
216 bool scb;
217 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
218 struct pcpu_tx_sc_stats __percpu *stats;
219};
220
221#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
222
223/**
224 * struct macsec_secy - MACsec Security Entity
225 * @netdev: netdevice for this SecY
226 * @n_rx_sc: number of receive secure channels configured on this SecY
227 * @sci: secure channel identifier used for tx
228 * @key_len: length of keys used by the cipher suite
229 * @icv_len: length of ICV used by the cipher suite
230 * @validate_frames: validation mode
231 * @operational: MAC_Operational flag
232 * @protect_frames: enable protection for this SecY
233 * @replay_protect: enable packet number checks on receive
234 * @replay_window: size of the replay window
235 * @tx_sc: transmit secure channel
236 * @rx_sc: linked list of receive secure channels
237 */
238struct macsec_secy {
239 struct net_device *netdev;
240 unsigned int n_rx_sc;
241 sci_t sci;
242 u16 key_len;
243 u16 icv_len;
244 enum macsec_validation_type validate_frames;
245 bool operational;
246 bool protect_frames;
247 bool replay_protect;
248 u32 replay_window;
249 struct macsec_tx_sc tx_sc;
250 struct macsec_rx_sc __rcu *rx_sc;
251};
252
253struct pcpu_secy_stats {
254 struct macsec_dev_stats stats;
255 struct u64_stats_sync syncp;
256};
257
258/**
259 * struct macsec_dev - private data
260 * @secy: SecY config
261 * @real_dev: pointer to underlying netdevice
262 * @stats: MACsec device stats
263 * @secys: linked list of SecY's on the underlying device
264 */
265struct macsec_dev {
266 struct macsec_secy secy;
267 struct net_device *real_dev;
268 struct pcpu_secy_stats __percpu *stats;
269 struct list_head secys;
270 struct gro_cells gro_cells;
271};
272
273/**
274 * struct macsec_rxh_data - rx_handler private argument
275 * @secys: linked list of SecY's on this underlying device
276 */
277struct macsec_rxh_data {
278 struct list_head secys;
279};
280
281static struct macsec_dev *macsec_priv(const struct net_device *dev)
282{
283 return (struct macsec_dev *)netdev_priv(dev);
284}
285
286static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
287{
288 return rcu_dereference_bh(dev->rx_handler_data);
289}
290
291static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
292{
293 return rtnl_dereference(dev->rx_handler_data);
294}
295
296struct macsec_cb {
297 struct aead_request *req;
298 union {
299 struct macsec_tx_sa *tx_sa;
300 struct macsec_rx_sa *rx_sa;
301 };
302 u8 assoc_num;
303 bool valid;
304 bool has_sci;
305};
306
307static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
308{
309 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
310
311 if (!sa || !sa->active)
312 return NULL;
313
314 if (!refcount_inc_not_zero(&sa->refcnt))
315 return NULL;
316
317 return sa;
318}
319
320static void free_rx_sc_rcu(struct rcu_head *head)
321{
322 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
323
324 free_percpu(rx_sc->stats);
325 kfree(rx_sc);
326}
327
328static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
329{
330 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
331}
332
333static void macsec_rxsc_put(struct macsec_rx_sc *sc)
334{
335 if (refcount_dec_and_test(&sc->refcnt))
336 call_rcu(&sc->rcu_head, free_rx_sc_rcu);
337}
338
339static void free_rxsa(struct rcu_head *head)
340{
341 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
342
343 crypto_free_aead(sa->key.tfm);
344 free_percpu(sa->stats);
345 kfree(sa);
346}
347
348static void macsec_rxsa_put(struct macsec_rx_sa *sa)
349{
350 if (refcount_dec_and_test(&sa->refcnt))
351 call_rcu(&sa->rcu, free_rxsa);
352}
353
354static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
355{
356 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
357
358 if (!sa || !sa->active)
359 return NULL;
360
361 if (!refcount_inc_not_zero(&sa->refcnt))
362 return NULL;
363
364 return sa;
365}
366
367static void free_txsa(struct rcu_head *head)
368{
369 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
370
371 crypto_free_aead(sa->key.tfm);
372 free_percpu(sa->stats);
373 kfree(sa);
374}
375
376static void macsec_txsa_put(struct macsec_tx_sa *sa)
377{
378 if (refcount_dec_and_test(&sa->refcnt))
379 call_rcu(&sa->rcu, free_txsa);
380}
381
382static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
383{
384 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
385 return (struct macsec_cb *)skb->cb;
386}
387
388#define MACSEC_PORT_ES (htons(0x0001))
389#define MACSEC_PORT_SCB (0x0000)
390#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
391
392#define MACSEC_GCM_AES_128_SAK_LEN 16
393#define MACSEC_GCM_AES_256_SAK_LEN 32
394
395#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
396#define DEFAULT_SEND_SCI true
397#define DEFAULT_ENCRYPT false
398#define DEFAULT_ENCODING_SA 0
399
400static bool send_sci(const struct macsec_secy *secy)
401{
402 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
403
404 return tx_sc->send_sci ||
405 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
406}
407
408static sci_t make_sci(u8 *addr, __be16 port)
409{
410 sci_t sci;
411
412 memcpy(&sci, addr, ETH_ALEN);
413 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
414
415 return sci;
416}
417
418static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
419{
420 sci_t sci;
421
422 if (sci_present)
423 memcpy(&sci, hdr->secure_channel_id,
424 sizeof(hdr->secure_channel_id));
425 else
426 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
427
428 return sci;
429}
430
431static unsigned int macsec_sectag_len(bool sci_present)
432{
433 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
434}
435
436static unsigned int macsec_hdr_len(bool sci_present)
437{
438 return macsec_sectag_len(sci_present) + ETH_HLEN;
439}
440
441static unsigned int macsec_extra_len(bool sci_present)
442{
443 return macsec_sectag_len(sci_present) + sizeof(__be16);
444}
445
446/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
447static void macsec_fill_sectag(struct macsec_eth_header *h,
448 const struct macsec_secy *secy, u32 pn,
449 bool sci_present)
450{
451 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
452
453 memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
454 h->eth.h_proto = htons(ETH_P_MACSEC);
455
456 if (sci_present) {
457 h->tci_an |= MACSEC_TCI_SC;
458 memcpy(&h->secure_channel_id, &secy->sci,
459 sizeof(h->secure_channel_id));
460 } else {
461 if (tx_sc->end_station)
462 h->tci_an |= MACSEC_TCI_ES;
463 if (tx_sc->scb)
464 h->tci_an |= MACSEC_TCI_SCB;
465 }
466
467 h->packet_number = htonl(pn);
468
469 /* with GCM, C/E clear for !encrypt, both set for encrypt */
470 if (tx_sc->encrypt)
471 h->tci_an |= MACSEC_TCI_CONFID;
472 else if (secy->icv_len != DEFAULT_ICV_LEN)
473 h->tci_an |= MACSEC_TCI_C;
474
475 h->tci_an |= tx_sc->encoding_sa;
476}
477
478static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
479{
480 if (data_len < MIN_NON_SHORT_LEN)
481 h->short_length = data_len;
482}
483
484/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
485static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
486{
487 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
488 int len = skb->len - 2 * ETH_ALEN;
489 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
490
491 /* a) It comprises at least 17 octets */
492 if (skb->len <= 16)
493 return false;
494
495 /* b) MACsec EtherType: already checked */
496
497 /* c) V bit is clear */
498 if (h->tci_an & MACSEC_TCI_VERSION)
499 return false;
500
501 /* d) ES or SCB => !SC */
502 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
503 (h->tci_an & MACSEC_TCI_SC))
504 return false;
505
506 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
507 if (h->unused)
508 return false;
509
510 /* rx.pn != 0 (figure 10-5) */
511 if (!h->packet_number)
512 return false;
513
514 /* length check, f) g) h) i) */
515 if (h->short_length)
516 return len == extra_len + h->short_length;
517 return len >= extra_len + MIN_NON_SHORT_LEN;
518}
519
520#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
521#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
522
523static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
524{
525 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
526
527 gcm_iv->sci = sci;
528 gcm_iv->pn = htonl(pn);
529}
530
531static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
532{
533 return (struct macsec_eth_header *)skb_mac_header(skb);
534}
535
536static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
537{
538 u32 pn;
539
540 spin_lock_bh(&tx_sa->lock);
541 pn = tx_sa->next_pn;
542
543 tx_sa->next_pn++;
544 if (tx_sa->next_pn == 0) {
545 pr_debug("PN wrapped, transitioning to !oper\n");
546 tx_sa->active = false;
547 if (secy->protect_frames)
548 secy->operational = false;
549 }
550 spin_unlock_bh(&tx_sa->lock);
551
552 return pn;
553}
554
555static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
556{
557 struct macsec_dev *macsec = netdev_priv(dev);
558
559 skb->dev = macsec->real_dev;
560 skb_reset_mac_header(skb);
561 skb->protocol = eth_hdr(skb)->h_proto;
562}
563
564static unsigned int macsec_msdu_len(struct sk_buff *skb)
565{
566 struct macsec_dev *macsec = macsec_priv(skb->dev);
567 struct macsec_secy *secy = &macsec->secy;
568 bool sci_present = macsec_skb_cb(skb)->has_sci;
569
570 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
571}
572
573static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
574 struct macsec_tx_sa *tx_sa)
575{
576 unsigned int msdu_len = macsec_msdu_len(skb);
577 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
578
579 u64_stats_update_begin(&txsc_stats->syncp);
580 if (tx_sc->encrypt) {
581 txsc_stats->stats.OutOctetsEncrypted += msdu_len;
582 txsc_stats->stats.OutPktsEncrypted++;
583 this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
584 } else {
585 txsc_stats->stats.OutOctetsProtected += msdu_len;
586 txsc_stats->stats.OutPktsProtected++;
587 this_cpu_inc(tx_sa->stats->OutPktsProtected);
588 }
589 u64_stats_update_end(&txsc_stats->syncp);
590}
591
592static void count_tx(struct net_device *dev, int ret, int len)
593{
594 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
595 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
596
597 u64_stats_update_begin(&stats->syncp);
598 stats->tx_packets++;
599 stats->tx_bytes += len;
600 u64_stats_update_end(&stats->syncp);
601 }
602}
603
604static void macsec_encrypt_done(struct crypto_async_request *base, int err)
605{
606 struct sk_buff *skb = base->data;
607 struct net_device *dev = skb->dev;
608 struct macsec_dev *macsec = macsec_priv(dev);
609 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
610 int len, ret;
611
612 aead_request_free(macsec_skb_cb(skb)->req);
613
614 rcu_read_lock_bh();
615 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
616 /* packet is encrypted/protected so tx_bytes must be calculated */
617 len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
618 macsec_encrypt_finish(skb, dev);
619 ret = dev_queue_xmit(skb);
620 count_tx(dev, ret, len);
621 rcu_read_unlock_bh();
622
623 macsec_txsa_put(sa);
624 dev_put(dev);
625}
626
627static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
628 unsigned char **iv,
629 struct scatterlist **sg,
630 int num_frags)
631{
632 size_t size, iv_offset, sg_offset;
633 struct aead_request *req;
634 void *tmp;
635
636 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
637 iv_offset = size;
638 size += GCM_AES_IV_LEN;
639
640 size = ALIGN(size, __alignof__(struct scatterlist));
641 sg_offset = size;
642 size += sizeof(struct scatterlist) * num_frags;
643
644 tmp = kmalloc(size, GFP_ATOMIC);
645 if (!tmp)
646 return NULL;
647
648 *iv = (unsigned char *)(tmp + iv_offset);
649 *sg = (struct scatterlist *)(tmp + sg_offset);
650 req = tmp;
651
652 aead_request_set_tfm(req, tfm);
653
654 return req;
655}
656
657static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
658 struct net_device *dev)
659{
660 int ret;
661 struct scatterlist *sg;
662 struct sk_buff *trailer;
663 unsigned char *iv;
664 struct ethhdr *eth;
665 struct macsec_eth_header *hh;
666 size_t unprotected_len;
667 struct aead_request *req;
668 struct macsec_secy *secy;
669 struct macsec_tx_sc *tx_sc;
670 struct macsec_tx_sa *tx_sa;
671 struct macsec_dev *macsec = macsec_priv(dev);
672 bool sci_present;
673 u32 pn;
674
675 secy = &macsec->secy;
676 tx_sc = &secy->tx_sc;
677
678 /* 10.5.1 TX SA assignment */
679 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
680 if (!tx_sa) {
681 secy->operational = false;
682 kfree_skb(skb);
683 return ERR_PTR(-EINVAL);
684 }
685
686 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
687 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
688 struct sk_buff *nskb = skb_copy_expand(skb,
689 MACSEC_NEEDED_HEADROOM,
690 MACSEC_NEEDED_TAILROOM,
691 GFP_ATOMIC);
692 if (likely(nskb)) {
693 consume_skb(skb);
694 skb = nskb;
695 } else {
696 macsec_txsa_put(tx_sa);
697 kfree_skb(skb);
698 return ERR_PTR(-ENOMEM);
699 }
700 } else {
701 skb = skb_unshare(skb, GFP_ATOMIC);
702 if (!skb) {
703 macsec_txsa_put(tx_sa);
704 return ERR_PTR(-ENOMEM);
705 }
706 }
707
708 unprotected_len = skb->len;
709 eth = eth_hdr(skb);
710 sci_present = send_sci(secy);
711 hh = skb_push(skb, macsec_extra_len(sci_present));
712 memmove(hh, eth, 2 * ETH_ALEN);
713
714 pn = tx_sa_update_pn(tx_sa, secy);
715 if (pn == 0) {
716 macsec_txsa_put(tx_sa);
717 kfree_skb(skb);
718 return ERR_PTR(-ENOLINK);
719 }
720 macsec_fill_sectag(hh, secy, pn, sci_present);
721 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
722
723 skb_put(skb, secy->icv_len);
724
725 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
726 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
727
728 u64_stats_update_begin(&secy_stats->syncp);
729 secy_stats->stats.OutPktsTooLong++;
730 u64_stats_update_end(&secy_stats->syncp);
731
732 macsec_txsa_put(tx_sa);
733 kfree_skb(skb);
734 return ERR_PTR(-EINVAL);
735 }
736
737 ret = skb_cow_data(skb, 0, &trailer);
738 if (unlikely(ret < 0)) {
739 macsec_txsa_put(tx_sa);
740 kfree_skb(skb);
741 return ERR_PTR(ret);
742 }
743
744 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
745 if (!req) {
746 macsec_txsa_put(tx_sa);
747 kfree_skb(skb);
748 return ERR_PTR(-ENOMEM);
749 }
750
751 macsec_fill_iv(iv, secy->sci, pn);
752
753 sg_init_table(sg, ret);
754 ret = skb_to_sgvec(skb, sg, 0, skb->len);
755 if (unlikely(ret < 0)) {
756 aead_request_free(req);
757 macsec_txsa_put(tx_sa);
758 kfree_skb(skb);
759 return ERR_PTR(ret);
760 }
761
762 if (tx_sc->encrypt) {
763 int len = skb->len - macsec_hdr_len(sci_present) -
764 secy->icv_len;
765 aead_request_set_crypt(req, sg, sg, len, iv);
766 aead_request_set_ad(req, macsec_hdr_len(sci_present));
767 } else {
768 aead_request_set_crypt(req, sg, sg, 0, iv);
769 aead_request_set_ad(req, skb->len - secy->icv_len);
770 }
771
772 macsec_skb_cb(skb)->req = req;
773 macsec_skb_cb(skb)->tx_sa = tx_sa;
774 macsec_skb_cb(skb)->has_sci = sci_present;
775 aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
776
777 dev_hold(skb->dev);
778 ret = crypto_aead_encrypt(req);
779 if (ret == -EINPROGRESS) {
780 return ERR_PTR(ret);
781 } else if (ret != 0) {
782 dev_put(skb->dev);
783 kfree_skb(skb);
784 aead_request_free(req);
785 macsec_txsa_put(tx_sa);
786 return ERR_PTR(-EINVAL);
787 }
788
789 dev_put(skb->dev);
790 aead_request_free(req);
791 macsec_txsa_put(tx_sa);
792
793 return skb;
794}
795
796static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
797{
798 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
799 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
800 struct macsec_eth_header *hdr = macsec_ethhdr(skb);
801 u32 lowest_pn = 0;
802
803 spin_lock(&rx_sa->lock);
804 if (rx_sa->next_pn >= secy->replay_window)
805 lowest_pn = rx_sa->next_pn - secy->replay_window;
806
807 /* Now perform replay protection check again
808 * (see IEEE 802.1AE-2006 figure 10-5)
809 */
810 if (secy->replay_protect && pn < lowest_pn) {
811 spin_unlock(&rx_sa->lock);
812 u64_stats_update_begin(&rxsc_stats->syncp);
813 rxsc_stats->stats.InPktsLate++;
814 u64_stats_update_end(&rxsc_stats->syncp);
815 DEV_STATS_INC(secy->netdev, rx_dropped);
816 return false;
817 }
818
819 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
820 unsigned int msdu_len = macsec_msdu_len(skb);
821 u64_stats_update_begin(&rxsc_stats->syncp);
822 if (hdr->tci_an & MACSEC_TCI_E)
823 rxsc_stats->stats.InOctetsDecrypted += msdu_len;
824 else
825 rxsc_stats->stats.InOctetsValidated += msdu_len;
826 u64_stats_update_end(&rxsc_stats->syncp);
827 }
828
829 if (!macsec_skb_cb(skb)->valid) {
830 spin_unlock(&rx_sa->lock);
831
832 /* 10.6.5 */
833 if (hdr->tci_an & MACSEC_TCI_C ||
834 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
835 u64_stats_update_begin(&rxsc_stats->syncp);
836 rxsc_stats->stats.InPktsNotValid++;
837 u64_stats_update_end(&rxsc_stats->syncp);
838 this_cpu_inc(rx_sa->stats->InPktsNotValid);
839 DEV_STATS_INC(secy->netdev, rx_errors);
840 return false;
841 }
842
843 u64_stats_update_begin(&rxsc_stats->syncp);
844 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
845 rxsc_stats->stats.InPktsInvalid++;
846 this_cpu_inc(rx_sa->stats->InPktsInvalid);
847 } else if (pn < lowest_pn) {
848 rxsc_stats->stats.InPktsDelayed++;
849 } else {
850 rxsc_stats->stats.InPktsUnchecked++;
851 }
852 u64_stats_update_end(&rxsc_stats->syncp);
853 } else {
854 u64_stats_update_begin(&rxsc_stats->syncp);
855 if (pn < lowest_pn) {
856 rxsc_stats->stats.InPktsDelayed++;
857 } else {
858 rxsc_stats->stats.InPktsOK++;
859 this_cpu_inc(rx_sa->stats->InPktsOK);
860 }
861 u64_stats_update_end(&rxsc_stats->syncp);
862
863 if (pn >= rx_sa->next_pn)
864 rx_sa->next_pn = pn + 1;
865 spin_unlock(&rx_sa->lock);
866 }
867
868 return true;
869}
870
871static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
872{
873 skb->pkt_type = PACKET_HOST;
874 skb->protocol = eth_type_trans(skb, dev);
875
876 skb_reset_network_header(skb);
877 if (!skb_transport_header_was_set(skb))
878 skb_reset_transport_header(skb);
879 skb_reset_mac_len(skb);
880}
881
882static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
883{
884 skb->ip_summed = CHECKSUM_NONE;
885 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
886 skb_pull(skb, hdr_len);
887 pskb_trim_unique(skb, skb->len - icv_len);
888}
889
890static void count_rx(struct net_device *dev, int len)
891{
892 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
893
894 u64_stats_update_begin(&stats->syncp);
895 stats->rx_packets++;
896 stats->rx_bytes += len;
897 u64_stats_update_end(&stats->syncp);
898}
899
900static void macsec_decrypt_done(struct crypto_async_request *base, int err)
901{
902 struct sk_buff *skb = base->data;
903 struct net_device *dev = skb->dev;
904 struct macsec_dev *macsec = macsec_priv(dev);
905 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
906 struct macsec_rx_sc *rx_sc = rx_sa->sc;
907 int len;
908 u32 pn;
909
910 aead_request_free(macsec_skb_cb(skb)->req);
911
912 if (!err)
913 macsec_skb_cb(skb)->valid = true;
914
915 rcu_read_lock_bh();
916 pn = ntohl(macsec_ethhdr(skb)->packet_number);
917 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
918 rcu_read_unlock_bh();
919 kfree_skb(skb);
920 goto out;
921 }
922
923 macsec_finalize_skb(skb, macsec->secy.icv_len,
924 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
925 len = skb->len;
926 macsec_reset_skb(skb, macsec->secy.netdev);
927
928 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
929 count_rx(dev, len);
930
931 rcu_read_unlock_bh();
932
933out:
934 macsec_rxsa_put(rx_sa);
935 macsec_rxsc_put(rx_sc);
936 dev_put(dev);
937}
938
939static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
940 struct net_device *dev,
941 struct macsec_rx_sa *rx_sa,
942 sci_t sci,
943 struct macsec_secy *secy)
944{
945 int ret;
946 struct scatterlist *sg;
947 struct sk_buff *trailer;
948 unsigned char *iv;
949 struct aead_request *req;
950 struct macsec_eth_header *hdr;
951 u16 icv_len = secy->icv_len;
952
953 macsec_skb_cb(skb)->valid = false;
954 skb = skb_share_check(skb, GFP_ATOMIC);
955 if (!skb)
956 return ERR_PTR(-ENOMEM);
957
958 ret = skb_cow_data(skb, 0, &trailer);
959 if (unlikely(ret < 0)) {
960 kfree_skb(skb);
961 return ERR_PTR(ret);
962 }
963 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
964 if (!req) {
965 kfree_skb(skb);
966 return ERR_PTR(-ENOMEM);
967 }
968
969 hdr = (struct macsec_eth_header *)skb->data;
970 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
971
972 sg_init_table(sg, ret);
973 ret = skb_to_sgvec(skb, sg, 0, skb->len);
974 if (unlikely(ret < 0)) {
975 aead_request_free(req);
976 kfree_skb(skb);
977 return ERR_PTR(ret);
978 }
979
980 if (hdr->tci_an & MACSEC_TCI_E) {
981 /* confidentiality: ethernet + macsec header
982 * authenticated, encrypted payload
983 */
984 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
985
986 aead_request_set_crypt(req, sg, sg, len, iv);
987 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
988 skb = skb_unshare(skb, GFP_ATOMIC);
989 if (!skb) {
990 aead_request_free(req);
991 return ERR_PTR(-ENOMEM);
992 }
993 } else {
994 /* integrity only: all headers + data authenticated */
995 aead_request_set_crypt(req, sg, sg, icv_len, iv);
996 aead_request_set_ad(req, skb->len - icv_len);
997 }
998
999 macsec_skb_cb(skb)->req = req;
1000 skb->dev = dev;
1001 aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
1002
1003 dev_hold(dev);
1004 ret = crypto_aead_decrypt(req);
1005 if (ret == -EINPROGRESS) {
1006 return ERR_PTR(ret);
1007 } else if (ret != 0) {
1008 /* decryption/authentication failed
1009 * 10.6 if validateFrames is disabled, deliver anyway
1010 */
1011 if (ret != -EBADMSG) {
1012 kfree_skb(skb);
1013 skb = ERR_PTR(ret);
1014 }
1015 } else {
1016 macsec_skb_cb(skb)->valid = true;
1017 }
1018 dev_put(dev);
1019
1020 aead_request_free(req);
1021
1022 return skb;
1023}
1024
1025static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
1026{
1027 struct macsec_rx_sc *rx_sc;
1028
1029 for_each_rxsc(secy, rx_sc) {
1030 if (rx_sc->sci == sci)
1031 return rx_sc;
1032 }
1033
1034 return NULL;
1035}
1036
1037static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
1038{
1039 struct macsec_rx_sc *rx_sc;
1040
1041 for_each_rxsc_rtnl(secy, rx_sc) {
1042 if (rx_sc->sci == sci)
1043 return rx_sc;
1044 }
1045
1046 return NULL;
1047}
1048
1049static void handle_not_macsec(struct sk_buff *skb)
1050{
1051 struct macsec_rxh_data *rxd;
1052 struct macsec_dev *macsec;
1053
1054 rcu_read_lock();
1055 rxd = macsec_data_rcu(skb->dev);
1056
1057 /* 10.6 If the management control validateFrames is not
1058 * Strict, frames without a SecTAG are received, counted, and
1059 * delivered to the Controlled Port
1060 */
1061 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1062 struct sk_buff *nskb;
1063 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1064
1065 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1066 u64_stats_update_begin(&secy_stats->syncp);
1067 secy_stats->stats.InPktsNoTag++;
1068 u64_stats_update_end(&secy_stats->syncp);
1069 DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1070 continue;
1071 }
1072
1073 /* deliver on this port */
1074 nskb = skb_clone(skb, GFP_ATOMIC);
1075 if (!nskb)
1076 break;
1077
1078 nskb->dev = macsec->secy.netdev;
1079
1080 if (netif_rx(nskb) == NET_RX_SUCCESS) {
1081 u64_stats_update_begin(&secy_stats->syncp);
1082 secy_stats->stats.InPktsUntagged++;
1083 u64_stats_update_end(&secy_stats->syncp);
1084 }
1085 }
1086
1087 rcu_read_unlock();
1088}
1089
1090static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1091{
1092 struct sk_buff *skb = *pskb;
1093 struct net_device *dev = skb->dev;
1094 struct macsec_eth_header *hdr;
1095 struct macsec_secy *secy = NULL;
1096 struct macsec_rx_sc *rx_sc;
1097 struct macsec_rx_sa *rx_sa;
1098 struct macsec_rxh_data *rxd;
1099 struct macsec_dev *macsec;
1100 unsigned int len;
1101 sci_t sci;
1102 u32 pn;
1103 bool cbit;
1104 struct pcpu_rx_sc_stats *rxsc_stats;
1105 struct pcpu_secy_stats *secy_stats;
1106 bool pulled_sci;
1107 int ret;
1108
1109 if (skb_headroom(skb) < ETH_HLEN)
1110 goto drop_direct;
1111
1112 hdr = macsec_ethhdr(skb);
1113 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
1114 handle_not_macsec(skb);
1115
1116 /* and deliver to the uncontrolled port */
1117 return RX_HANDLER_PASS;
1118 }
1119
1120 skb = skb_unshare(skb, GFP_ATOMIC);
1121 *pskb = skb;
1122 if (!skb)
1123 return RX_HANDLER_CONSUMED;
1124
1125 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1126 if (!pulled_sci) {
1127 if (!pskb_may_pull(skb, macsec_extra_len(false)))
1128 goto drop_direct;
1129 }
1130
1131 hdr = macsec_ethhdr(skb);
1132
1133 /* Frames with a SecTAG that has the TCI E bit set but the C
1134 * bit clear are discarded, as this reserved encoding is used
1135 * to identify frames with a SecTAG that are not to be
1136 * delivered to the Controlled Port.
1137 */
1138 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1139 return RX_HANDLER_PASS;
1140
1141 /* now, pull the extra length */
1142 if (hdr->tci_an & MACSEC_TCI_SC) {
1143 if (!pulled_sci)
1144 goto drop_direct;
1145 }
1146
1147 /* ethernet header is part of crypto processing */
1148 skb_push(skb, ETH_HLEN);
1149
1150 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1151 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1152 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1153
1154 rcu_read_lock();
1155 rxd = macsec_data_rcu(skb->dev);
1156
1157 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1158 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1159
1160 sc = sc ? macsec_rxsc_get(sc) : NULL;
1161
1162 if (sc) {
1163 secy = &macsec->secy;
1164 rx_sc = sc;
1165 break;
1166 }
1167 }
1168
1169 if (!secy)
1170 goto nosci;
1171
1172 dev = secy->netdev;
1173 macsec = macsec_priv(dev);
1174 secy_stats = this_cpu_ptr(macsec->stats);
1175 rxsc_stats = this_cpu_ptr(rx_sc->stats);
1176
1177 if (!macsec_validate_skb(skb, secy->icv_len)) {
1178 u64_stats_update_begin(&secy_stats->syncp);
1179 secy_stats->stats.InPktsBadTag++;
1180 u64_stats_update_end(&secy_stats->syncp);
1181 DEV_STATS_INC(secy->netdev, rx_errors);
1182 goto drop_nosa;
1183 }
1184
1185 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1186 if (!rx_sa) {
1187 /* 10.6.1 if the SA is not in use */
1188
1189 /* If validateFrames is Strict or the C bit in the
1190 * SecTAG is set, discard
1191 */
1192 if (hdr->tci_an & MACSEC_TCI_C ||
1193 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1194 u64_stats_update_begin(&rxsc_stats->syncp);
1195 rxsc_stats->stats.InPktsNotUsingSA++;
1196 u64_stats_update_end(&rxsc_stats->syncp);
1197 DEV_STATS_INC(secy->netdev, rx_errors);
1198 goto drop_nosa;
1199 }
1200
1201 /* not Strict, the frame (with the SecTAG and ICV
1202 * removed) is delivered to the Controlled Port.
1203 */
1204 u64_stats_update_begin(&rxsc_stats->syncp);
1205 rxsc_stats->stats.InPktsUnusedSA++;
1206 u64_stats_update_end(&rxsc_stats->syncp);
1207 goto deliver;
1208 }
1209
1210 /* First, PN check to avoid decrypting obviously wrong packets */
1211 pn = ntohl(hdr->packet_number);
1212 if (secy->replay_protect) {
1213 bool late;
1214
1215 spin_lock(&rx_sa->lock);
1216 late = rx_sa->next_pn >= secy->replay_window &&
1217 pn < (rx_sa->next_pn - secy->replay_window);
1218 spin_unlock(&rx_sa->lock);
1219
1220 if (late) {
1221 u64_stats_update_begin(&rxsc_stats->syncp);
1222 rxsc_stats->stats.InPktsLate++;
1223 u64_stats_update_end(&rxsc_stats->syncp);
1224 DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1225 goto drop;
1226 }
1227 }
1228
1229 macsec_skb_cb(skb)->rx_sa = rx_sa;
1230
1231 /* Disabled && !changed text => skip validation */
1232 if (hdr->tci_an & MACSEC_TCI_C ||
1233 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1234 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1235
1236 if (IS_ERR(skb)) {
1237 /* the decrypt callback needs the reference */
1238 if (PTR_ERR(skb) != -EINPROGRESS) {
1239 macsec_rxsa_put(rx_sa);
1240 macsec_rxsc_put(rx_sc);
1241 }
1242 rcu_read_unlock();
1243 *pskb = NULL;
1244 return RX_HANDLER_CONSUMED;
1245 }
1246
1247 if (!macsec_post_decrypt(skb, secy, pn))
1248 goto drop;
1249
1250deliver:
1251 macsec_finalize_skb(skb, secy->icv_len,
1252 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1253 len = skb->len;
1254 macsec_reset_skb(skb, secy->netdev);
1255
1256 if (rx_sa)
1257 macsec_rxsa_put(rx_sa);
1258 macsec_rxsc_put(rx_sc);
1259
1260 skb_orphan(skb);
1261 ret = gro_cells_receive(&macsec->gro_cells, skb);
1262 if (ret == NET_RX_SUCCESS)
1263 count_rx(dev, len);
1264 else
1265 DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1266
1267 rcu_read_unlock();
1268
1269 *pskb = NULL;
1270 return RX_HANDLER_CONSUMED;
1271
1272drop:
1273 macsec_rxsa_put(rx_sa);
1274drop_nosa:
1275 macsec_rxsc_put(rx_sc);
1276 rcu_read_unlock();
1277drop_direct:
1278 kfree_skb(skb);
1279 *pskb = NULL;
1280 return RX_HANDLER_CONSUMED;
1281
1282nosci:
1283 /* 10.6.1 if the SC is not found */
1284 cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1285 if (!cbit)
1286 macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1287 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1288
1289 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1290 struct sk_buff *nskb;
1291
1292 secy_stats = this_cpu_ptr(macsec->stats);
1293
1294 /* If validateFrames is Strict or the C bit in the
1295 * SecTAG is set, discard
1296 */
1297 if (cbit ||
1298 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1299 u64_stats_update_begin(&secy_stats->syncp);
1300 secy_stats->stats.InPktsNoSCI++;
1301 u64_stats_update_end(&secy_stats->syncp);
1302 DEV_STATS_INC(macsec->secy.netdev, rx_errors);
1303 continue;
1304 }
1305
1306 /* not strict, the frame (with the SecTAG and ICV
1307 * removed) is delivered to the Controlled Port.
1308 */
1309 nskb = skb_clone(skb, GFP_ATOMIC);
1310 if (!nskb)
1311 break;
1312
1313 macsec_reset_skb(nskb, macsec->secy.netdev);
1314
1315 ret = netif_rx(nskb);
1316 if (ret == NET_RX_SUCCESS) {
1317 u64_stats_update_begin(&secy_stats->syncp);
1318 secy_stats->stats.InPktsUnknownSCI++;
1319 u64_stats_update_end(&secy_stats->syncp);
1320 } else {
1321 DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1322 }
1323 }
1324
1325 rcu_read_unlock();
1326 *pskb = skb;
1327 return RX_HANDLER_PASS;
1328}
1329
1330static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1331{
1332 struct crypto_aead *tfm;
1333 int ret;
1334
1335 tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1336
1337 if (IS_ERR(tfm))
1338 return tfm;
1339
1340 ret = crypto_aead_setkey(tfm, key, key_len);
1341 if (ret < 0)
1342 goto fail;
1343
1344 ret = crypto_aead_setauthsize(tfm, icv_len);
1345 if (ret < 0)
1346 goto fail;
1347
1348 return tfm;
1349fail:
1350 crypto_free_aead(tfm);
1351 return ERR_PTR(ret);
1352}
1353
1354static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1355 int icv_len)
1356{
1357 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1358 if (!rx_sa->stats)
1359 return -ENOMEM;
1360
1361 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1362 if (IS_ERR(rx_sa->key.tfm)) {
1363 free_percpu(rx_sa->stats);
1364 return PTR_ERR(rx_sa->key.tfm);
1365 }
1366
1367 rx_sa->active = false;
1368 rx_sa->next_pn = 1;
1369 refcount_set(&rx_sa->refcnt, 1);
1370 spin_lock_init(&rx_sa->lock);
1371
1372 return 0;
1373}
1374
1375static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1376{
1377 rx_sa->active = false;
1378
1379 macsec_rxsa_put(rx_sa);
1380}
1381
1382static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1383{
1384 int i;
1385
1386 for (i = 0; i < MACSEC_NUM_AN; i++) {
1387 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1388
1389 RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1390 if (sa)
1391 clear_rx_sa(sa);
1392 }
1393
1394 macsec_rxsc_put(rx_sc);
1395}
1396
1397static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1398{
1399 struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1400
1401 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1402 rx_sc;
1403 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1404 if (rx_sc->sci == sci) {
1405 if (rx_sc->active)
1406 secy->n_rx_sc--;
1407 rcu_assign_pointer(*rx_scp, rx_sc->next);
1408 return rx_sc;
1409 }
1410 }
1411
1412 return NULL;
1413}
1414
1415static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1416{
1417 struct macsec_rx_sc *rx_sc;
1418 struct macsec_dev *macsec;
1419 struct net_device *real_dev = macsec_priv(dev)->real_dev;
1420 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1421 struct macsec_secy *secy;
1422
1423 list_for_each_entry(macsec, &rxd->secys, secys) {
1424 if (find_rx_sc_rtnl(&macsec->secy, sci))
1425 return ERR_PTR(-EEXIST);
1426 }
1427
1428 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1429 if (!rx_sc)
1430 return ERR_PTR(-ENOMEM);
1431
1432 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1433 if (!rx_sc->stats) {
1434 kfree(rx_sc);
1435 return ERR_PTR(-ENOMEM);
1436 }
1437
1438 rx_sc->sci = sci;
1439 rx_sc->active = true;
1440 refcount_set(&rx_sc->refcnt, 1);
1441
1442 secy = &macsec_priv(dev)->secy;
1443 rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1444 rcu_assign_pointer(secy->rx_sc, rx_sc);
1445
1446 if (rx_sc->active)
1447 secy->n_rx_sc++;
1448
1449 return rx_sc;
1450}
1451
1452static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1453 int icv_len)
1454{
1455 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1456 if (!tx_sa->stats)
1457 return -ENOMEM;
1458
1459 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1460 if (IS_ERR(tx_sa->key.tfm)) {
1461 free_percpu(tx_sa->stats);
1462 return PTR_ERR(tx_sa->key.tfm);
1463 }
1464
1465 tx_sa->active = false;
1466 refcount_set(&tx_sa->refcnt, 1);
1467 spin_lock_init(&tx_sa->lock);
1468
1469 return 0;
1470}
1471
1472static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1473{
1474 tx_sa->active = false;
1475
1476 macsec_txsa_put(tx_sa);
1477}
1478
1479static struct genl_family macsec_fam;
1480
1481static struct net_device *get_dev_from_nl(struct net *net,
1482 struct nlattr **attrs)
1483{
1484 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1485 struct net_device *dev;
1486
1487 dev = __dev_get_by_index(net, ifindex);
1488 if (!dev)
1489 return ERR_PTR(-ENODEV);
1490
1491 if (!netif_is_macsec(dev))
1492 return ERR_PTR(-ENODEV);
1493
1494 return dev;
1495}
1496
1497static sci_t nla_get_sci(const struct nlattr *nla)
1498{
1499 return (__force sci_t)nla_get_u64(nla);
1500}
1501
1502static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1503 int padattr)
1504{
1505 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1506}
1507
1508static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1509 struct nlattr **attrs,
1510 struct nlattr **tb_sa,
1511 struct net_device **devp,
1512 struct macsec_secy **secyp,
1513 struct macsec_tx_sc **scp,
1514 u8 *assoc_num)
1515{
1516 struct net_device *dev;
1517 struct macsec_secy *secy;
1518 struct macsec_tx_sc *tx_sc;
1519 struct macsec_tx_sa *tx_sa;
1520
1521 if (!tb_sa[MACSEC_SA_ATTR_AN])
1522 return ERR_PTR(-EINVAL);
1523
1524 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1525
1526 dev = get_dev_from_nl(net, attrs);
1527 if (IS_ERR(dev))
1528 return ERR_CAST(dev);
1529
1530 if (*assoc_num >= MACSEC_NUM_AN)
1531 return ERR_PTR(-EINVAL);
1532
1533 secy = &macsec_priv(dev)->secy;
1534 tx_sc = &secy->tx_sc;
1535
1536 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1537 if (!tx_sa)
1538 return ERR_PTR(-ENODEV);
1539
1540 *devp = dev;
1541 *scp = tx_sc;
1542 *secyp = secy;
1543 return tx_sa;
1544}
1545
1546static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1547 struct nlattr **attrs,
1548 struct nlattr **tb_rxsc,
1549 struct net_device **devp,
1550 struct macsec_secy **secyp)
1551{
1552 struct net_device *dev;
1553 struct macsec_secy *secy;
1554 struct macsec_rx_sc *rx_sc;
1555 sci_t sci;
1556
1557 dev = get_dev_from_nl(net, attrs);
1558 if (IS_ERR(dev))
1559 return ERR_CAST(dev);
1560
1561 secy = &macsec_priv(dev)->secy;
1562
1563 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1564 return ERR_PTR(-EINVAL);
1565
1566 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1567 rx_sc = find_rx_sc_rtnl(secy, sci);
1568 if (!rx_sc)
1569 return ERR_PTR(-ENODEV);
1570
1571 *secyp = secy;
1572 *devp = dev;
1573
1574 return rx_sc;
1575}
1576
1577static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1578 struct nlattr **attrs,
1579 struct nlattr **tb_rxsc,
1580 struct nlattr **tb_sa,
1581 struct net_device **devp,
1582 struct macsec_secy **secyp,
1583 struct macsec_rx_sc **scp,
1584 u8 *assoc_num)
1585{
1586 struct macsec_rx_sc *rx_sc;
1587 struct macsec_rx_sa *rx_sa;
1588
1589 if (!tb_sa[MACSEC_SA_ATTR_AN])
1590 return ERR_PTR(-EINVAL);
1591
1592 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1593 if (*assoc_num >= MACSEC_NUM_AN)
1594 return ERR_PTR(-EINVAL);
1595
1596 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1597 if (IS_ERR(rx_sc))
1598 return ERR_CAST(rx_sc);
1599
1600 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1601 if (!rx_sa)
1602 return ERR_PTR(-ENODEV);
1603
1604 *scp = rx_sc;
1605 return rx_sa;
1606}
1607
1608static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1609 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1610 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1611 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1612};
1613
1614static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1615 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1616 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1617};
1618
1619static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1620 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1621 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1622 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
1623 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1624 .len = MACSEC_KEYID_LEN, },
1625 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1626 .len = MACSEC_MAX_KEY_LEN, },
1627};
1628
1629static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1630{
1631 if (!attrs[MACSEC_ATTR_SA_CONFIG])
1632 return -EINVAL;
1633
1634 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1635 return -EINVAL;
1636
1637 return 0;
1638}
1639
1640static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1641{
1642 if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1643 return -EINVAL;
1644
1645 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1646 return -EINVAL;
1647
1648 return 0;
1649}
1650
1651static bool validate_add_rxsa(struct nlattr **attrs)
1652{
1653 if (!attrs[MACSEC_SA_ATTR_AN] ||
1654 !attrs[MACSEC_SA_ATTR_KEY] ||
1655 !attrs[MACSEC_SA_ATTR_KEYID])
1656 return false;
1657
1658 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1659 return false;
1660
1661 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1662 return false;
1663
1664 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1665 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1666 return false;
1667 }
1668
1669 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1670 return false;
1671
1672 return true;
1673}
1674
1675static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1676{
1677 struct net_device *dev;
1678 struct nlattr **attrs = info->attrs;
1679 struct macsec_secy *secy;
1680 struct macsec_rx_sc *rx_sc;
1681 struct macsec_rx_sa *rx_sa;
1682 unsigned char assoc_num;
1683 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1684 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1685 int err;
1686
1687 if (!attrs[MACSEC_ATTR_IFINDEX])
1688 return -EINVAL;
1689
1690 if (parse_sa_config(attrs, tb_sa))
1691 return -EINVAL;
1692
1693 if (parse_rxsc_config(attrs, tb_rxsc))
1694 return -EINVAL;
1695
1696 if (!validate_add_rxsa(tb_sa))
1697 return -EINVAL;
1698
1699 rtnl_lock();
1700 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1701 if (IS_ERR(rx_sc)) {
1702 rtnl_unlock();
1703 return PTR_ERR(rx_sc);
1704 }
1705
1706 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1707
1708 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1709 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1710 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1711 rtnl_unlock();
1712 return -EINVAL;
1713 }
1714
1715 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1716 if (rx_sa) {
1717 rtnl_unlock();
1718 return -EBUSY;
1719 }
1720
1721 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1722 if (!rx_sa) {
1723 rtnl_unlock();
1724 return -ENOMEM;
1725 }
1726
1727 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1728 secy->key_len, secy->icv_len);
1729 if (err < 0) {
1730 kfree(rx_sa);
1731 rtnl_unlock();
1732 return err;
1733 }
1734
1735 if (tb_sa[MACSEC_SA_ATTR_PN]) {
1736 spin_lock_bh(&rx_sa->lock);
1737 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1738 spin_unlock_bh(&rx_sa->lock);
1739 }
1740
1741 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1742 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1743
1744 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1745 rx_sa->sc = rx_sc;
1746 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1747
1748 rtnl_unlock();
1749
1750 return 0;
1751}
1752
1753static bool validate_add_rxsc(struct nlattr **attrs)
1754{
1755 if (!attrs[MACSEC_RXSC_ATTR_SCI])
1756 return false;
1757
1758 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1759 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1760 return false;
1761 }
1762
1763 return true;
1764}
1765
1766static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1767{
1768 struct net_device *dev;
1769 sci_t sci = MACSEC_UNDEF_SCI;
1770 struct nlattr **attrs = info->attrs;
1771 struct macsec_rx_sc *rx_sc;
1772 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1773
1774 if (!attrs[MACSEC_ATTR_IFINDEX])
1775 return -EINVAL;
1776
1777 if (parse_rxsc_config(attrs, tb_rxsc))
1778 return -EINVAL;
1779
1780 if (!validate_add_rxsc(tb_rxsc))
1781 return -EINVAL;
1782
1783 rtnl_lock();
1784 dev = get_dev_from_nl(genl_info_net(info), attrs);
1785 if (IS_ERR(dev)) {
1786 rtnl_unlock();
1787 return PTR_ERR(dev);
1788 }
1789
1790 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1791
1792 rx_sc = create_rx_sc(dev, sci);
1793 if (IS_ERR(rx_sc)) {
1794 rtnl_unlock();
1795 return PTR_ERR(rx_sc);
1796 }
1797
1798 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1799 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1800
1801 rtnl_unlock();
1802
1803 return 0;
1804}
1805
1806static bool validate_add_txsa(struct nlattr **attrs)
1807{
1808 if (!attrs[MACSEC_SA_ATTR_AN] ||
1809 !attrs[MACSEC_SA_ATTR_PN] ||
1810 !attrs[MACSEC_SA_ATTR_KEY] ||
1811 !attrs[MACSEC_SA_ATTR_KEYID])
1812 return false;
1813
1814 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1815 return false;
1816
1817 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1818 return false;
1819
1820 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1821 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1822 return false;
1823 }
1824
1825 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1826 return false;
1827
1828 return true;
1829}
1830
1831static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1832{
1833 struct net_device *dev;
1834 struct nlattr **attrs = info->attrs;
1835 struct macsec_secy *secy;
1836 struct macsec_tx_sc *tx_sc;
1837 struct macsec_tx_sa *tx_sa;
1838 unsigned char assoc_num;
1839 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1840 int err;
1841
1842 if (!attrs[MACSEC_ATTR_IFINDEX])
1843 return -EINVAL;
1844
1845 if (parse_sa_config(attrs, tb_sa))
1846 return -EINVAL;
1847
1848 if (!validate_add_txsa(tb_sa))
1849 return -EINVAL;
1850
1851 rtnl_lock();
1852 dev = get_dev_from_nl(genl_info_net(info), attrs);
1853 if (IS_ERR(dev)) {
1854 rtnl_unlock();
1855 return PTR_ERR(dev);
1856 }
1857
1858 secy = &macsec_priv(dev)->secy;
1859 tx_sc = &secy->tx_sc;
1860
1861 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1862
1863 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1864 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1865 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1866 rtnl_unlock();
1867 return -EINVAL;
1868 }
1869
1870 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
1871 if (tx_sa) {
1872 rtnl_unlock();
1873 return -EBUSY;
1874 }
1875
1876 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
1877 if (!tx_sa) {
1878 rtnl_unlock();
1879 return -ENOMEM;
1880 }
1881
1882 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1883 secy->key_len, secy->icv_len);
1884 if (err < 0) {
1885 kfree(tx_sa);
1886 rtnl_unlock();
1887 return err;
1888 }
1889
1890 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1891
1892 spin_lock_bh(&tx_sa->lock);
1893 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1894 spin_unlock_bh(&tx_sa->lock);
1895
1896 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1897 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1898
1899 if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
1900 secy->operational = true;
1901
1902 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
1903
1904 rtnl_unlock();
1905
1906 return 0;
1907}
1908
1909static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
1910{
1911 struct nlattr **attrs = info->attrs;
1912 struct net_device *dev;
1913 struct macsec_secy *secy;
1914 struct macsec_rx_sc *rx_sc;
1915 struct macsec_rx_sa *rx_sa;
1916 u8 assoc_num;
1917 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1918 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1919
1920 if (!attrs[MACSEC_ATTR_IFINDEX])
1921 return -EINVAL;
1922
1923 if (parse_sa_config(attrs, tb_sa))
1924 return -EINVAL;
1925
1926 if (parse_rxsc_config(attrs, tb_rxsc))
1927 return -EINVAL;
1928
1929 rtnl_lock();
1930 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
1931 &dev, &secy, &rx_sc, &assoc_num);
1932 if (IS_ERR(rx_sa)) {
1933 rtnl_unlock();
1934 return PTR_ERR(rx_sa);
1935 }
1936
1937 if (rx_sa->active) {
1938 rtnl_unlock();
1939 return -EBUSY;
1940 }
1941
1942 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
1943 clear_rx_sa(rx_sa);
1944
1945 rtnl_unlock();
1946
1947 return 0;
1948}
1949
1950static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
1951{
1952 struct nlattr **attrs = info->attrs;
1953 struct net_device *dev;
1954 struct macsec_secy *secy;
1955 struct macsec_rx_sc *rx_sc;
1956 sci_t sci;
1957 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1958
1959 if (!attrs[MACSEC_ATTR_IFINDEX])
1960 return -EINVAL;
1961
1962 if (parse_rxsc_config(attrs, tb_rxsc))
1963 return -EINVAL;
1964
1965 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1966 return -EINVAL;
1967
1968 rtnl_lock();
1969 dev = get_dev_from_nl(genl_info_net(info), info->attrs);
1970 if (IS_ERR(dev)) {
1971 rtnl_unlock();
1972 return PTR_ERR(dev);
1973 }
1974
1975 secy = &macsec_priv(dev)->secy;
1976 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1977
1978 rx_sc = del_rx_sc(secy, sci);
1979 if (!rx_sc) {
1980 rtnl_unlock();
1981 return -ENODEV;
1982 }
1983
1984 free_rx_sc(rx_sc);
1985 rtnl_unlock();
1986
1987 return 0;
1988}
1989
1990static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
1991{
1992 struct nlattr **attrs = info->attrs;
1993 struct net_device *dev;
1994 struct macsec_secy *secy;
1995 struct macsec_tx_sc *tx_sc;
1996 struct macsec_tx_sa *tx_sa;
1997 u8 assoc_num;
1998 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1999
2000 if (!attrs[MACSEC_ATTR_IFINDEX])
2001 return -EINVAL;
2002
2003 if (parse_sa_config(attrs, tb_sa))
2004 return -EINVAL;
2005
2006 rtnl_lock();
2007 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2008 &dev, &secy, &tx_sc, &assoc_num);
2009 if (IS_ERR(tx_sa)) {
2010 rtnl_unlock();
2011 return PTR_ERR(tx_sa);
2012 }
2013
2014 if (tx_sa->active) {
2015 rtnl_unlock();
2016 return -EBUSY;
2017 }
2018
2019 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2020 clear_tx_sa(tx_sa);
2021
2022 rtnl_unlock();
2023
2024 return 0;
2025}
2026
2027static bool validate_upd_sa(struct nlattr **attrs)
2028{
2029 if (!attrs[MACSEC_SA_ATTR_AN] ||
2030 attrs[MACSEC_SA_ATTR_KEY] ||
2031 attrs[MACSEC_SA_ATTR_KEYID])
2032 return false;
2033
2034 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2035 return false;
2036
2037 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
2038 return false;
2039
2040 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2041 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2042 return false;
2043 }
2044
2045 return true;
2046}
2047
2048static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2049{
2050 struct nlattr **attrs = info->attrs;
2051 struct net_device *dev;
2052 struct macsec_secy *secy;
2053 struct macsec_tx_sc *tx_sc;
2054 struct macsec_tx_sa *tx_sa;
2055 u8 assoc_num;
2056 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2057
2058 if (!attrs[MACSEC_ATTR_IFINDEX])
2059 return -EINVAL;
2060
2061 if (parse_sa_config(attrs, tb_sa))
2062 return -EINVAL;
2063
2064 if (!validate_upd_sa(tb_sa))
2065 return -EINVAL;
2066
2067 rtnl_lock();
2068 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2069 &dev, &secy, &tx_sc, &assoc_num);
2070 if (IS_ERR(tx_sa)) {
2071 rtnl_unlock();
2072 return PTR_ERR(tx_sa);
2073 }
2074
2075 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2076 spin_lock_bh(&tx_sa->lock);
2077 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2078 spin_unlock_bh(&tx_sa->lock);
2079 }
2080
2081 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2082 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2083
2084 if (assoc_num == tx_sc->encoding_sa)
2085 secy->operational = tx_sa->active;
2086
2087 rtnl_unlock();
2088
2089 return 0;
2090}
2091
2092static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2093{
2094 struct nlattr **attrs = info->attrs;
2095 struct net_device *dev;
2096 struct macsec_secy *secy;
2097 struct macsec_rx_sc *rx_sc;
2098 struct macsec_rx_sa *rx_sa;
2099 u8 assoc_num;
2100 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2101 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2102
2103 if (!attrs[MACSEC_ATTR_IFINDEX])
2104 return -EINVAL;
2105
2106 if (parse_rxsc_config(attrs, tb_rxsc))
2107 return -EINVAL;
2108
2109 if (parse_sa_config(attrs, tb_sa))
2110 return -EINVAL;
2111
2112 if (!validate_upd_sa(tb_sa))
2113 return -EINVAL;
2114
2115 rtnl_lock();
2116 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2117 &dev, &secy, &rx_sc, &assoc_num);
2118 if (IS_ERR(rx_sa)) {
2119 rtnl_unlock();
2120 return PTR_ERR(rx_sa);
2121 }
2122
2123 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2124 spin_lock_bh(&rx_sa->lock);
2125 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2126 spin_unlock_bh(&rx_sa->lock);
2127 }
2128
2129 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2130 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2131
2132 rtnl_unlock();
2133 return 0;
2134}
2135
2136static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2137{
2138 struct nlattr **attrs = info->attrs;
2139 struct net_device *dev;
2140 struct macsec_secy *secy;
2141 struct macsec_rx_sc *rx_sc;
2142 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2143
2144 if (!attrs[MACSEC_ATTR_IFINDEX])
2145 return -EINVAL;
2146
2147 if (parse_rxsc_config(attrs, tb_rxsc))
2148 return -EINVAL;
2149
2150 if (!validate_add_rxsc(tb_rxsc))
2151 return -EINVAL;
2152
2153 rtnl_lock();
2154 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2155 if (IS_ERR(rx_sc)) {
2156 rtnl_unlock();
2157 return PTR_ERR(rx_sc);
2158 }
2159
2160 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2161 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2162
2163 if (rx_sc->active != new)
2164 secy->n_rx_sc += new ? 1 : -1;
2165
2166 rx_sc->active = new;
2167 }
2168
2169 rtnl_unlock();
2170
2171 return 0;
2172}
2173
2174static int copy_tx_sa_stats(struct sk_buff *skb,
2175 struct macsec_tx_sa_stats __percpu *pstats)
2176{
2177 struct macsec_tx_sa_stats sum = {0, };
2178 int cpu;
2179
2180 for_each_possible_cpu(cpu) {
2181 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
2182
2183 sum.OutPktsProtected += stats->OutPktsProtected;
2184 sum.OutPktsEncrypted += stats->OutPktsEncrypted;
2185 }
2186
2187 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
2188 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
2189 return -EMSGSIZE;
2190
2191 return 0;
2192}
2193
2194static noinline_for_stack int
2195copy_rx_sa_stats(struct sk_buff *skb,
2196 struct macsec_rx_sa_stats __percpu *pstats)
2197{
2198 struct macsec_rx_sa_stats sum = {0, };
2199 int cpu;
2200
2201 for_each_possible_cpu(cpu) {
2202 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
2203
2204 sum.InPktsOK += stats->InPktsOK;
2205 sum.InPktsInvalid += stats->InPktsInvalid;
2206 sum.InPktsNotValid += stats->InPktsNotValid;
2207 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
2208 sum.InPktsUnusedSA += stats->InPktsUnusedSA;
2209 }
2210
2211 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
2212 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
2213 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
2214 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
2215 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
2216 return -EMSGSIZE;
2217
2218 return 0;
2219}
2220
2221static noinline_for_stack int
2222copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
2223{
2224 struct macsec_rx_sc_stats sum = {0, };
2225 int cpu;
2226
2227 for_each_possible_cpu(cpu) {
2228 const struct pcpu_rx_sc_stats *stats;
2229 struct macsec_rx_sc_stats tmp;
2230 unsigned int start;
2231
2232 stats = per_cpu_ptr(pstats, cpu);
2233 do {
2234 start = u64_stats_fetch_begin_irq(&stats->syncp);
2235 memcpy(&tmp, &stats->stats, sizeof(tmp));
2236 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2237
2238 sum.InOctetsValidated += tmp.InOctetsValidated;
2239 sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
2240 sum.InPktsUnchecked += tmp.InPktsUnchecked;
2241 sum.InPktsDelayed += tmp.InPktsDelayed;
2242 sum.InPktsOK += tmp.InPktsOK;
2243 sum.InPktsInvalid += tmp.InPktsInvalid;
2244 sum.InPktsLate += tmp.InPktsLate;
2245 sum.InPktsNotValid += tmp.InPktsNotValid;
2246 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
2247 sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
2248 }
2249
2250 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2251 sum.InOctetsValidated,
2252 MACSEC_RXSC_STATS_ATTR_PAD) ||
2253 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2254 sum.InOctetsDecrypted,
2255 MACSEC_RXSC_STATS_ATTR_PAD) ||
2256 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2257 sum.InPktsUnchecked,
2258 MACSEC_RXSC_STATS_ATTR_PAD) ||
2259 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2260 sum.InPktsDelayed,
2261 MACSEC_RXSC_STATS_ATTR_PAD) ||
2262 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2263 sum.InPktsOK,
2264 MACSEC_RXSC_STATS_ATTR_PAD) ||
2265 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2266 sum.InPktsInvalid,
2267 MACSEC_RXSC_STATS_ATTR_PAD) ||
2268 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2269 sum.InPktsLate,
2270 MACSEC_RXSC_STATS_ATTR_PAD) ||
2271 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2272 sum.InPktsNotValid,
2273 MACSEC_RXSC_STATS_ATTR_PAD) ||
2274 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2275 sum.InPktsNotUsingSA,
2276 MACSEC_RXSC_STATS_ATTR_PAD) ||
2277 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2278 sum.InPktsUnusedSA,
2279 MACSEC_RXSC_STATS_ATTR_PAD))
2280 return -EMSGSIZE;
2281
2282 return 0;
2283}
2284
2285static noinline_for_stack int
2286copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
2287{
2288 struct macsec_tx_sc_stats sum = {0, };
2289 int cpu;
2290
2291 for_each_possible_cpu(cpu) {
2292 const struct pcpu_tx_sc_stats *stats;
2293 struct macsec_tx_sc_stats tmp;
2294 unsigned int start;
2295
2296 stats = per_cpu_ptr(pstats, cpu);
2297 do {
2298 start = u64_stats_fetch_begin_irq(&stats->syncp);
2299 memcpy(&tmp, &stats->stats, sizeof(tmp));
2300 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2301
2302 sum.OutPktsProtected += tmp.OutPktsProtected;
2303 sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
2304 sum.OutOctetsProtected += tmp.OutOctetsProtected;
2305 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2306 }
2307
2308 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2309 sum.OutPktsProtected,
2310 MACSEC_TXSC_STATS_ATTR_PAD) ||
2311 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2312 sum.OutPktsEncrypted,
2313 MACSEC_TXSC_STATS_ATTR_PAD) ||
2314 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2315 sum.OutOctetsProtected,
2316 MACSEC_TXSC_STATS_ATTR_PAD) ||
2317 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2318 sum.OutOctetsEncrypted,
2319 MACSEC_TXSC_STATS_ATTR_PAD))
2320 return -EMSGSIZE;
2321
2322 return 0;
2323}
2324
2325static noinline_for_stack int
2326copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
2327{
2328 struct macsec_dev_stats sum = {0, };
2329 int cpu;
2330
2331 for_each_possible_cpu(cpu) {
2332 const struct pcpu_secy_stats *stats;
2333 struct macsec_dev_stats tmp;
2334 unsigned int start;
2335
2336 stats = per_cpu_ptr(pstats, cpu);
2337 do {
2338 start = u64_stats_fetch_begin_irq(&stats->syncp);
2339 memcpy(&tmp, &stats->stats, sizeof(tmp));
2340 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2341
2342 sum.OutPktsUntagged += tmp.OutPktsUntagged;
2343 sum.InPktsUntagged += tmp.InPktsUntagged;
2344 sum.OutPktsTooLong += tmp.OutPktsTooLong;
2345 sum.InPktsNoTag += tmp.InPktsNoTag;
2346 sum.InPktsBadTag += tmp.InPktsBadTag;
2347 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2348 sum.InPktsNoSCI += tmp.InPktsNoSCI;
2349 sum.InPktsOverrun += tmp.InPktsOverrun;
2350 }
2351
2352 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2353 sum.OutPktsUntagged,
2354 MACSEC_SECY_STATS_ATTR_PAD) ||
2355 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2356 sum.InPktsUntagged,
2357 MACSEC_SECY_STATS_ATTR_PAD) ||
2358 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2359 sum.OutPktsTooLong,
2360 MACSEC_SECY_STATS_ATTR_PAD) ||
2361 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2362 sum.InPktsNoTag,
2363 MACSEC_SECY_STATS_ATTR_PAD) ||
2364 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2365 sum.InPktsBadTag,
2366 MACSEC_SECY_STATS_ATTR_PAD) ||
2367 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2368 sum.InPktsUnknownSCI,
2369 MACSEC_SECY_STATS_ATTR_PAD) ||
2370 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2371 sum.InPktsNoSCI,
2372 MACSEC_SECY_STATS_ATTR_PAD) ||
2373 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2374 sum.InPktsOverrun,
2375 MACSEC_SECY_STATS_ATTR_PAD))
2376 return -EMSGSIZE;
2377
2378 return 0;
2379}
2380
2381static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2382{
2383 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2384 struct nlattr *secy_nest = nla_nest_start_noflag(skb,
2385 MACSEC_ATTR_SECY);
2386 u64 csid;
2387
2388 if (!secy_nest)
2389 return 1;
2390
2391 switch (secy->key_len) {
2392 case MACSEC_GCM_AES_128_SAK_LEN:
2393 csid = MACSEC_DEFAULT_CIPHER_ID;
2394 break;
2395 case MACSEC_GCM_AES_256_SAK_LEN:
2396 csid = MACSEC_CIPHER_ID_GCM_AES_256;
2397 break;
2398 default:
2399 goto cancel;
2400 }
2401
2402 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2403 MACSEC_SECY_ATTR_PAD) ||
2404 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2405 csid, MACSEC_SECY_ATTR_PAD) ||
2406 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2407 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2408 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2409 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
2410 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
2411 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
2412 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
2413 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
2414 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
2415 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
2416 goto cancel;
2417
2418 if (secy->replay_protect) {
2419 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
2420 goto cancel;
2421 }
2422
2423 nla_nest_end(skb, secy_nest);
2424 return 0;
2425
2426cancel:
2427 nla_nest_cancel(skb, secy_nest);
2428 return 1;
2429}
2430
2431static noinline_for_stack int
2432dump_secy(struct macsec_secy *secy, struct net_device *dev,
2433 struct sk_buff *skb, struct netlink_callback *cb)
2434{
2435 struct macsec_rx_sc *rx_sc;
2436 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2437 struct nlattr *txsa_list, *rxsc_list;
2438 int i, j;
2439 void *hdr;
2440 struct nlattr *attr;
2441
2442 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2443 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
2444 if (!hdr)
2445 return -EMSGSIZE;
2446
2447 genl_dump_check_consistent(cb, hdr);
2448
2449 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
2450 goto nla_put_failure;
2451
2452 if (nla_put_secy(secy, skb))
2453 goto nla_put_failure;
2454
2455 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
2456 if (!attr)
2457 goto nla_put_failure;
2458 if (copy_tx_sc_stats(skb, tx_sc->stats)) {
2459 nla_nest_cancel(skb, attr);
2460 goto nla_put_failure;
2461 }
2462 nla_nest_end(skb, attr);
2463
2464 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
2465 if (!attr)
2466 goto nla_put_failure;
2467 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
2468 nla_nest_cancel(skb, attr);
2469 goto nla_put_failure;
2470 }
2471 nla_nest_end(skb, attr);
2472
2473 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
2474 if (!txsa_list)
2475 goto nla_put_failure;
2476 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
2477 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
2478 struct nlattr *txsa_nest;
2479
2480 if (!tx_sa)
2481 continue;
2482
2483 txsa_nest = nla_nest_start_noflag(skb, j++);
2484 if (!txsa_nest) {
2485 nla_nest_cancel(skb, txsa_list);
2486 goto nla_put_failure;
2487 }
2488
2489 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2490 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
2491 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
2492 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
2493 nla_nest_cancel(skb, txsa_nest);
2494 nla_nest_cancel(skb, txsa_list);
2495 goto nla_put_failure;
2496 }
2497
2498 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
2499 if (!attr) {
2500 nla_nest_cancel(skb, txsa_nest);
2501 nla_nest_cancel(skb, txsa_list);
2502 goto nla_put_failure;
2503 }
2504 if (copy_tx_sa_stats(skb, tx_sa->stats)) {
2505 nla_nest_cancel(skb, attr);
2506 nla_nest_cancel(skb, txsa_nest);
2507 nla_nest_cancel(skb, txsa_list);
2508 goto nla_put_failure;
2509 }
2510 nla_nest_end(skb, attr);
2511
2512 nla_nest_end(skb, txsa_nest);
2513 }
2514 nla_nest_end(skb, txsa_list);
2515
2516 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
2517 if (!rxsc_list)
2518 goto nla_put_failure;
2519
2520 j = 1;
2521 for_each_rxsc_rtnl(secy, rx_sc) {
2522 int k;
2523 struct nlattr *rxsa_list;
2524 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
2525
2526 if (!rxsc_nest) {
2527 nla_nest_cancel(skb, rxsc_list);
2528 goto nla_put_failure;
2529 }
2530
2531 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
2532 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
2533 MACSEC_RXSC_ATTR_PAD)) {
2534 nla_nest_cancel(skb, rxsc_nest);
2535 nla_nest_cancel(skb, rxsc_list);
2536 goto nla_put_failure;
2537 }
2538
2539 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
2540 if (!attr) {
2541 nla_nest_cancel(skb, rxsc_nest);
2542 nla_nest_cancel(skb, rxsc_list);
2543 goto nla_put_failure;
2544 }
2545 if (copy_rx_sc_stats(skb, rx_sc->stats)) {
2546 nla_nest_cancel(skb, attr);
2547 nla_nest_cancel(skb, rxsc_nest);
2548 nla_nest_cancel(skb, rxsc_list);
2549 goto nla_put_failure;
2550 }
2551 nla_nest_end(skb, attr);
2552
2553 rxsa_list = nla_nest_start_noflag(skb,
2554 MACSEC_RXSC_ATTR_SA_LIST);
2555 if (!rxsa_list) {
2556 nla_nest_cancel(skb, rxsc_nest);
2557 nla_nest_cancel(skb, rxsc_list);
2558 goto nla_put_failure;
2559 }
2560
2561 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
2562 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
2563 struct nlattr *rxsa_nest;
2564
2565 if (!rx_sa)
2566 continue;
2567
2568 rxsa_nest = nla_nest_start_noflag(skb, k++);
2569 if (!rxsa_nest) {
2570 nla_nest_cancel(skb, rxsa_list);
2571 nla_nest_cancel(skb, rxsc_nest);
2572 nla_nest_cancel(skb, rxsc_list);
2573 goto nla_put_failure;
2574 }
2575
2576 attr = nla_nest_start_noflag(skb,
2577 MACSEC_SA_ATTR_STATS);
2578 if (!attr) {
2579 nla_nest_cancel(skb, rxsa_list);
2580 nla_nest_cancel(skb, rxsc_nest);
2581 nla_nest_cancel(skb, rxsc_list);
2582 goto nla_put_failure;
2583 }
2584 if (copy_rx_sa_stats(skb, rx_sa->stats)) {
2585 nla_nest_cancel(skb, attr);
2586 nla_nest_cancel(skb, rxsa_list);
2587 nla_nest_cancel(skb, rxsc_nest);
2588 nla_nest_cancel(skb, rxsc_list);
2589 goto nla_put_failure;
2590 }
2591 nla_nest_end(skb, attr);
2592
2593 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2594 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
2595 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
2596 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
2597 nla_nest_cancel(skb, rxsa_nest);
2598 nla_nest_cancel(skb, rxsc_nest);
2599 nla_nest_cancel(skb, rxsc_list);
2600 goto nla_put_failure;
2601 }
2602 nla_nest_end(skb, rxsa_nest);
2603 }
2604
2605 nla_nest_end(skb, rxsa_list);
2606 nla_nest_end(skb, rxsc_nest);
2607 }
2608
2609 nla_nest_end(skb, rxsc_list);
2610
2611 genlmsg_end(skb, hdr);
2612
2613 return 0;
2614
2615nla_put_failure:
2616 genlmsg_cancel(skb, hdr);
2617 return -EMSGSIZE;
2618}
2619
2620static int macsec_generation = 1; /* protected by RTNL */
2621
2622static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2623{
2624 struct net *net = sock_net(skb->sk);
2625 struct net_device *dev;
2626 int dev_idx, d;
2627
2628 dev_idx = cb->args[0];
2629
2630 d = 0;
2631 rtnl_lock();
2632
2633 cb->seq = macsec_generation;
2634
2635 for_each_netdev(net, dev) {
2636 struct macsec_secy *secy;
2637
2638 if (d < dev_idx)
2639 goto next;
2640
2641 if (!netif_is_macsec(dev))
2642 goto next;
2643
2644 secy = &macsec_priv(dev)->secy;
2645 if (dump_secy(secy, dev, skb, cb) < 0)
2646 goto done;
2647next:
2648 d++;
2649 }
2650
2651done:
2652 rtnl_unlock();
2653 cb->args[0] = d;
2654 return skb->len;
2655}
2656
2657static const struct genl_ops macsec_genl_ops[] = {
2658 {
2659 .cmd = MACSEC_CMD_GET_TXSC,
2660 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2661 .dumpit = macsec_dump_txsc,
2662 },
2663 {
2664 .cmd = MACSEC_CMD_ADD_RXSC,
2665 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2666 .doit = macsec_add_rxsc,
2667 .flags = GENL_ADMIN_PERM,
2668 },
2669 {
2670 .cmd = MACSEC_CMD_DEL_RXSC,
2671 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2672 .doit = macsec_del_rxsc,
2673 .flags = GENL_ADMIN_PERM,
2674 },
2675 {
2676 .cmd = MACSEC_CMD_UPD_RXSC,
2677 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2678 .doit = macsec_upd_rxsc,
2679 .flags = GENL_ADMIN_PERM,
2680 },
2681 {
2682 .cmd = MACSEC_CMD_ADD_TXSA,
2683 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2684 .doit = macsec_add_txsa,
2685 .flags = GENL_ADMIN_PERM,
2686 },
2687 {
2688 .cmd = MACSEC_CMD_DEL_TXSA,
2689 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2690 .doit = macsec_del_txsa,
2691 .flags = GENL_ADMIN_PERM,
2692 },
2693 {
2694 .cmd = MACSEC_CMD_UPD_TXSA,
2695 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2696 .doit = macsec_upd_txsa,
2697 .flags = GENL_ADMIN_PERM,
2698 },
2699 {
2700 .cmd = MACSEC_CMD_ADD_RXSA,
2701 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2702 .doit = macsec_add_rxsa,
2703 .flags = GENL_ADMIN_PERM,
2704 },
2705 {
2706 .cmd = MACSEC_CMD_DEL_RXSA,
2707 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2708 .doit = macsec_del_rxsa,
2709 .flags = GENL_ADMIN_PERM,
2710 },
2711 {
2712 .cmd = MACSEC_CMD_UPD_RXSA,
2713 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2714 .doit = macsec_upd_rxsa,
2715 .flags = GENL_ADMIN_PERM,
2716 },
2717};
2718
2719static struct genl_family macsec_fam __ro_after_init = {
2720 .name = MACSEC_GENL_NAME,
2721 .hdrsize = 0,
2722 .version = MACSEC_GENL_VERSION,
2723 .maxattr = MACSEC_ATTR_MAX,
2724 .policy = macsec_genl_policy,
2725 .netnsok = true,
2726 .module = THIS_MODULE,
2727 .ops = macsec_genl_ops,
2728 .n_ops = ARRAY_SIZE(macsec_genl_ops),
2729};
2730
2731static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2732 struct net_device *dev)
2733{
2734 struct macsec_dev *macsec = netdev_priv(dev);
2735 struct macsec_secy *secy = &macsec->secy;
2736 struct pcpu_secy_stats *secy_stats;
2737 int ret, len;
2738
2739 /* 10.5 */
2740 if (!secy->protect_frames) {
2741 secy_stats = this_cpu_ptr(macsec->stats);
2742 u64_stats_update_begin(&secy_stats->syncp);
2743 secy_stats->stats.OutPktsUntagged++;
2744 u64_stats_update_end(&secy_stats->syncp);
2745 skb->dev = macsec->real_dev;
2746 len = skb->len;
2747 ret = dev_queue_xmit(skb);
2748 count_tx(dev, ret, len);
2749 return ret;
2750 }
2751
2752 if (!secy->operational) {
2753 kfree_skb(skb);
2754 DEV_STATS_INC(dev, tx_dropped);
2755 return NETDEV_TX_OK;
2756 }
2757
2758 len = skb->len;
2759 skb = macsec_encrypt(skb, dev);
2760 if (IS_ERR(skb)) {
2761 if (PTR_ERR(skb) != -EINPROGRESS)
2762 DEV_STATS_INC(dev, tx_dropped);
2763 return NETDEV_TX_OK;
2764 }
2765
2766 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
2767
2768 macsec_encrypt_finish(skb, dev);
2769 ret = dev_queue_xmit(skb);
2770 count_tx(dev, ret, len);
2771 return ret;
2772}
2773
2774#define MACSEC_FEATURES \
2775 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2776
2777static int macsec_dev_init(struct net_device *dev)
2778{
2779 struct macsec_dev *macsec = macsec_priv(dev);
2780 struct net_device *real_dev = macsec->real_dev;
2781 int err;
2782
2783 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2784 if (!dev->tstats)
2785 return -ENOMEM;
2786
2787 err = gro_cells_init(&macsec->gro_cells, dev);
2788 if (err) {
2789 free_percpu(dev->tstats);
2790 return err;
2791 }
2792
2793 dev->features = real_dev->features & MACSEC_FEATURES;
2794 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
2795
2796 dev->needed_headroom = real_dev->needed_headroom +
2797 MACSEC_NEEDED_HEADROOM;
2798 dev->needed_tailroom = real_dev->needed_tailroom +
2799 MACSEC_NEEDED_TAILROOM;
2800
2801 if (is_zero_ether_addr(dev->dev_addr))
2802 eth_hw_addr_inherit(dev, real_dev);
2803 if (is_zero_ether_addr(dev->broadcast))
2804 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
2805
2806 return 0;
2807}
2808
2809static void macsec_dev_uninit(struct net_device *dev)
2810{
2811 struct macsec_dev *macsec = macsec_priv(dev);
2812
2813 gro_cells_destroy(&macsec->gro_cells);
2814 free_percpu(dev->tstats);
2815}
2816
2817static netdev_features_t macsec_fix_features(struct net_device *dev,
2818 netdev_features_t features)
2819{
2820 struct macsec_dev *macsec = macsec_priv(dev);
2821 struct net_device *real_dev = macsec->real_dev;
2822
2823 features &= (real_dev->features & MACSEC_FEATURES) |
2824 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
2825 features |= NETIF_F_LLTX;
2826
2827 return features;
2828}
2829
2830static int macsec_dev_open(struct net_device *dev)
2831{
2832 struct macsec_dev *macsec = macsec_priv(dev);
2833 struct net_device *real_dev = macsec->real_dev;
2834 int err;
2835
2836 err = dev_uc_add(real_dev, dev->dev_addr);
2837 if (err < 0)
2838 return err;
2839
2840 if (dev->flags & IFF_ALLMULTI) {
2841 err = dev_set_allmulti(real_dev, 1);
2842 if (err < 0)
2843 goto del_unicast;
2844 }
2845
2846 if (dev->flags & IFF_PROMISC) {
2847 err = dev_set_promiscuity(real_dev, 1);
2848 if (err < 0)
2849 goto clear_allmulti;
2850 }
2851
2852 if (netif_carrier_ok(real_dev))
2853 netif_carrier_on(dev);
2854
2855 return 0;
2856clear_allmulti:
2857 if (dev->flags & IFF_ALLMULTI)
2858 dev_set_allmulti(real_dev, -1);
2859del_unicast:
2860 dev_uc_del(real_dev, dev->dev_addr);
2861 netif_carrier_off(dev);
2862 return err;
2863}
2864
2865static int macsec_dev_stop(struct net_device *dev)
2866{
2867 struct macsec_dev *macsec = macsec_priv(dev);
2868 struct net_device *real_dev = macsec->real_dev;
2869
2870 netif_carrier_off(dev);
2871
2872 dev_mc_unsync(real_dev, dev);
2873 dev_uc_unsync(real_dev, dev);
2874
2875 if (dev->flags & IFF_ALLMULTI)
2876 dev_set_allmulti(real_dev, -1);
2877
2878 if (dev->flags & IFF_PROMISC)
2879 dev_set_promiscuity(real_dev, -1);
2880
2881 dev_uc_del(real_dev, dev->dev_addr);
2882
2883 return 0;
2884}
2885
2886static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
2887{
2888 struct net_device *real_dev = macsec_priv(dev)->real_dev;
2889
2890 if (!(dev->flags & IFF_UP))
2891 return;
2892
2893 if (change & IFF_ALLMULTI)
2894 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
2895
2896 if (change & IFF_PROMISC)
2897 dev_set_promiscuity(real_dev,
2898 dev->flags & IFF_PROMISC ? 1 : -1);
2899}
2900
2901static void macsec_dev_set_rx_mode(struct net_device *dev)
2902{
2903 struct net_device *real_dev = macsec_priv(dev)->real_dev;
2904
2905 dev_mc_sync(real_dev, dev);
2906 dev_uc_sync(real_dev, dev);
2907}
2908
2909static sci_t dev_to_sci(struct net_device *dev, __be16 port)
2910{
2911 return make_sci(dev->dev_addr, port);
2912}
2913
2914static int macsec_set_mac_address(struct net_device *dev, void *p)
2915{
2916 struct macsec_dev *macsec = macsec_priv(dev);
2917 struct net_device *real_dev = macsec->real_dev;
2918 struct sockaddr *addr = p;
2919 int err;
2920
2921 if (!is_valid_ether_addr(addr->sa_data))
2922 return -EADDRNOTAVAIL;
2923
2924 if (!(dev->flags & IFF_UP))
2925 goto out;
2926
2927 err = dev_uc_add(real_dev, addr->sa_data);
2928 if (err < 0)
2929 return err;
2930
2931 dev_uc_del(real_dev, dev->dev_addr);
2932
2933out:
2934 ether_addr_copy(dev->dev_addr, addr->sa_data);
2935 macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
2936 return 0;
2937}
2938
2939static int macsec_change_mtu(struct net_device *dev, int new_mtu)
2940{
2941 struct macsec_dev *macsec = macsec_priv(dev);
2942 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
2943
2944 if (macsec->real_dev->mtu - extra < new_mtu)
2945 return -ERANGE;
2946
2947 dev->mtu = new_mtu;
2948
2949 return 0;
2950}
2951
2952static void macsec_get_stats64(struct net_device *dev,
2953 struct rtnl_link_stats64 *s)
2954{
2955 int cpu;
2956
2957 if (!dev->tstats)
2958 return;
2959
2960 for_each_possible_cpu(cpu) {
2961 struct pcpu_sw_netstats *stats;
2962 struct pcpu_sw_netstats tmp;
2963 int start;
2964
2965 stats = per_cpu_ptr(dev->tstats, cpu);
2966 do {
2967 start = u64_stats_fetch_begin_irq(&stats->syncp);
2968 tmp.rx_packets = stats->rx_packets;
2969 tmp.rx_bytes = stats->rx_bytes;
2970 tmp.tx_packets = stats->tx_packets;
2971 tmp.tx_bytes = stats->tx_bytes;
2972 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2973
2974 s->rx_packets += tmp.rx_packets;
2975 s->rx_bytes += tmp.rx_bytes;
2976 s->tx_packets += tmp.tx_packets;
2977 s->tx_bytes += tmp.tx_bytes;
2978 }
2979
2980 s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
2981 s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2982 s->rx_errors = DEV_STATS_READ(dev, rx_errors);
2983}
2984
2985static int macsec_get_iflink(const struct net_device *dev)
2986{
2987 return macsec_priv(dev)->real_dev->ifindex;
2988}
2989
2990static const struct net_device_ops macsec_netdev_ops = {
2991 .ndo_init = macsec_dev_init,
2992 .ndo_uninit = macsec_dev_uninit,
2993 .ndo_open = macsec_dev_open,
2994 .ndo_stop = macsec_dev_stop,
2995 .ndo_fix_features = macsec_fix_features,
2996 .ndo_change_mtu = macsec_change_mtu,
2997 .ndo_set_rx_mode = macsec_dev_set_rx_mode,
2998 .ndo_change_rx_flags = macsec_dev_change_rx_flags,
2999 .ndo_set_mac_address = macsec_set_mac_address,
3000 .ndo_start_xmit = macsec_start_xmit,
3001 .ndo_get_stats64 = macsec_get_stats64,
3002 .ndo_get_iflink = macsec_get_iflink,
3003};
3004
3005static const struct device_type macsec_type = {
3006 .name = "macsec",
3007};
3008
3009static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3010 [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3011 [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3012 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3013 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3014 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3015 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3016 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3017 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3018 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3019 [IFLA_MACSEC_ES] = { .type = NLA_U8 },
3020 [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3021 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3022 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3023};
3024
3025static void macsec_free_netdev(struct net_device *dev)
3026{
3027 struct macsec_dev *macsec = macsec_priv(dev);
3028
3029 free_percpu(macsec->stats);
3030 free_percpu(macsec->secy.tx_sc.stats);
3031
3032}
3033
3034static void macsec_setup(struct net_device *dev)
3035{
3036 ether_setup(dev);
3037 dev->min_mtu = 0;
3038 dev->max_mtu = ETH_MAX_MTU;
3039 dev->priv_flags |= IFF_NO_QUEUE;
3040 dev->netdev_ops = &macsec_netdev_ops;
3041 dev->needs_free_netdev = true;
3042 dev->priv_destructor = macsec_free_netdev;
3043 SET_NETDEV_DEVTYPE(dev, &macsec_type);
3044
3045 eth_zero_addr(dev->broadcast);
3046}
3047
3048static int macsec_changelink_common(struct net_device *dev,
3049 struct nlattr *data[])
3050{
3051 struct macsec_secy *secy;
3052 struct macsec_tx_sc *tx_sc;
3053
3054 secy = &macsec_priv(dev)->secy;
3055 tx_sc = &secy->tx_sc;
3056
3057 if (data[IFLA_MACSEC_ENCODING_SA]) {
3058 struct macsec_tx_sa *tx_sa;
3059
3060 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3061 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3062
3063 secy->operational = tx_sa && tx_sa->active;
3064 }
3065
3066 if (data[IFLA_MACSEC_WINDOW])
3067 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3068
3069 if (data[IFLA_MACSEC_ENCRYPT])
3070 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3071
3072 if (data[IFLA_MACSEC_PROTECT])
3073 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3074
3075 if (data[IFLA_MACSEC_INC_SCI])
3076 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3077
3078 if (data[IFLA_MACSEC_ES])
3079 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3080
3081 if (data[IFLA_MACSEC_SCB])
3082 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3083
3084 if (data[IFLA_MACSEC_REPLAY_PROTECT])
3085 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3086
3087 if (data[IFLA_MACSEC_VALIDATION])
3088 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3089
3090 if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3091 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3092 case MACSEC_CIPHER_ID_GCM_AES_128:
3093 case MACSEC_DEFAULT_CIPHER_ID:
3094 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3095 break;
3096 case MACSEC_CIPHER_ID_GCM_AES_256:
3097 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3098 break;
3099 default:
3100 return -EINVAL;
3101 }
3102 }
3103
3104 return 0;
3105}
3106
3107static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3108 struct nlattr *data[],
3109 struct netlink_ext_ack *extack)
3110{
3111 if (!data)
3112 return 0;
3113
3114 if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3115 data[IFLA_MACSEC_ICV_LEN] ||
3116 data[IFLA_MACSEC_SCI] ||
3117 data[IFLA_MACSEC_PORT])
3118 return -EINVAL;
3119
3120 return macsec_changelink_common(dev, data);
3121}
3122
3123static void macsec_del_dev(struct macsec_dev *macsec)
3124{
3125 int i;
3126
3127 while (macsec->secy.rx_sc) {
3128 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3129
3130 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3131 free_rx_sc(rx_sc);
3132 }
3133
3134 for (i = 0; i < MACSEC_NUM_AN; i++) {
3135 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3136
3137 if (sa) {
3138 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3139 clear_tx_sa(sa);
3140 }
3141 }
3142}
3143
3144static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3145{
3146 struct macsec_dev *macsec = macsec_priv(dev);
3147 struct net_device *real_dev = macsec->real_dev;
3148
3149 unregister_netdevice_queue(dev, head);
3150 list_del_rcu(&macsec->secys);
3151 macsec_del_dev(macsec);
3152 netdev_upper_dev_unlink(real_dev, dev);
3153
3154 macsec_generation++;
3155}
3156
3157static void macsec_dellink(struct net_device *dev, struct list_head *head)
3158{
3159 struct macsec_dev *macsec = macsec_priv(dev);
3160 struct net_device *real_dev = macsec->real_dev;
3161 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3162
3163 macsec_common_dellink(dev, head);
3164
3165 if (list_empty(&rxd->secys)) {
3166 netdev_rx_handler_unregister(real_dev);
3167 kfree(rxd);
3168 }
3169}
3170
3171static int register_macsec_dev(struct net_device *real_dev,
3172 struct net_device *dev)
3173{
3174 struct macsec_dev *macsec = macsec_priv(dev);
3175 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3176
3177 if (!rxd) {
3178 int err;
3179
3180 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3181 if (!rxd)
3182 return -ENOMEM;
3183
3184 INIT_LIST_HEAD(&rxd->secys);
3185
3186 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3187 rxd);
3188 if (err < 0) {
3189 kfree(rxd);
3190 return err;
3191 }
3192 }
3193
3194 list_add_tail_rcu(&macsec->secys, &rxd->secys);
3195 return 0;
3196}
3197
3198static bool sci_exists(struct net_device *dev, sci_t sci)
3199{
3200 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3201 struct macsec_dev *macsec;
3202
3203 list_for_each_entry(macsec, &rxd->secys, secys) {
3204 if (macsec->secy.sci == sci)
3205 return true;
3206 }
3207
3208 return false;
3209}
3210
3211static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3212{
3213 struct macsec_dev *macsec = macsec_priv(dev);
3214 struct macsec_secy *secy = &macsec->secy;
3215
3216 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3217 if (!macsec->stats)
3218 return -ENOMEM;
3219
3220 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3221 if (!secy->tx_sc.stats) {
3222 free_percpu(macsec->stats);
3223 return -ENOMEM;
3224 }
3225
3226 if (sci == MACSEC_UNDEF_SCI)
3227 sci = dev_to_sci(dev, MACSEC_PORT_ES);
3228
3229 secy->netdev = dev;
3230 secy->operational = true;
3231 secy->key_len = DEFAULT_SAK_LEN;
3232 secy->icv_len = icv_len;
3233 secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3234 secy->protect_frames = true;
3235 secy->replay_protect = false;
3236
3237 secy->sci = sci;
3238 secy->tx_sc.active = true;
3239 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3240 secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3241 secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3242 secy->tx_sc.end_station = false;
3243 secy->tx_sc.scb = false;
3244
3245 return 0;
3246}
3247
3248static int macsec_newlink(struct net *net, struct net_device *dev,
3249 struct nlattr *tb[], struct nlattr *data[],
3250 struct netlink_ext_ack *extack)
3251{
3252 struct macsec_dev *macsec = macsec_priv(dev);
3253 rx_handler_func_t *rx_handler;
3254 u8 icv_len = DEFAULT_ICV_LEN;
3255 struct net_device *real_dev;
3256 int err, mtu;
3257 sci_t sci;
3258
3259 if (!tb[IFLA_LINK])
3260 return -EINVAL;
3261 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
3262 if (!real_dev)
3263 return -ENODEV;
3264 if (real_dev->type != ARPHRD_ETHER)
3265 return -EINVAL;
3266
3267 dev->priv_flags |= IFF_MACSEC;
3268
3269 macsec->real_dev = real_dev;
3270
3271 /* send_sci must be set to true when transmit sci explicitly is set */
3272 if ((data && data[IFLA_MACSEC_SCI]) &&
3273 (data && data[IFLA_MACSEC_INC_SCI])) {
3274 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3275
3276 if (!send_sci)
3277 return -EINVAL;
3278 }
3279
3280 if (data && data[IFLA_MACSEC_ICV_LEN])
3281 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3282 mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
3283 if (mtu < 0)
3284 dev->mtu = 0;
3285 else
3286 dev->mtu = mtu;
3287
3288 rx_handler = rtnl_dereference(real_dev->rx_handler);
3289 if (rx_handler && rx_handler != macsec_handle_frame)
3290 return -EBUSY;
3291
3292 err = register_netdevice(dev);
3293 if (err < 0)
3294 return err;
3295
3296 err = netdev_upper_dev_link(real_dev, dev, extack);
3297 if (err < 0)
3298 goto unregister;
3299
3300 /* need to be already registered so that ->init has run and
3301 * the MAC addr is set
3302 */
3303 if (data && data[IFLA_MACSEC_SCI])
3304 sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
3305 else if (data && data[IFLA_MACSEC_PORT])
3306 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
3307 else
3308 sci = dev_to_sci(dev, MACSEC_PORT_ES);
3309
3310 if (rx_handler && sci_exists(real_dev, sci)) {
3311 err = -EBUSY;
3312 goto unlink;
3313 }
3314
3315 err = macsec_add_dev(dev, sci, icv_len);
3316 if (err)
3317 goto unlink;
3318
3319 if (data) {
3320 err = macsec_changelink_common(dev, data);
3321 if (err)
3322 goto del_dev;
3323 }
3324
3325 err = register_macsec_dev(real_dev, dev);
3326 if (err < 0)
3327 goto del_dev;
3328
3329 netif_stacked_transfer_operstate(real_dev, dev);
3330 linkwatch_fire_event(dev);
3331
3332 macsec_generation++;
3333
3334 return 0;
3335
3336del_dev:
3337 macsec_del_dev(macsec);
3338unlink:
3339 netdev_upper_dev_unlink(real_dev, dev);
3340unregister:
3341 unregister_netdevice(dev);
3342 return err;
3343}
3344
3345static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
3346 struct netlink_ext_ack *extack)
3347{
3348 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
3349 u8 icv_len = DEFAULT_ICV_LEN;
3350 int flag;
3351 bool es, scb, sci;
3352
3353 if (!data)
3354 return 0;
3355
3356 if (data[IFLA_MACSEC_CIPHER_SUITE])
3357 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
3358
3359 if (data[IFLA_MACSEC_ICV_LEN]) {
3360 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3361 if (icv_len != DEFAULT_ICV_LEN) {
3362 char dummy_key[DEFAULT_SAK_LEN] = { 0 };
3363 struct crypto_aead *dummy_tfm;
3364
3365 dummy_tfm = macsec_alloc_tfm(dummy_key,
3366 DEFAULT_SAK_LEN,
3367 icv_len);
3368 if (IS_ERR(dummy_tfm))
3369 return PTR_ERR(dummy_tfm);
3370 crypto_free_aead(dummy_tfm);
3371 }
3372 }
3373
3374 switch (csid) {
3375 case MACSEC_CIPHER_ID_GCM_AES_128:
3376 case MACSEC_CIPHER_ID_GCM_AES_256:
3377 case MACSEC_DEFAULT_CIPHER_ID:
3378 if (icv_len < MACSEC_MIN_ICV_LEN ||
3379 icv_len > MACSEC_STD_ICV_LEN)
3380 return -EINVAL;
3381 break;
3382 default:
3383 return -EINVAL;
3384 }
3385
3386 if (data[IFLA_MACSEC_ENCODING_SA]) {
3387 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
3388 return -EINVAL;
3389 }
3390
3391 for (flag = IFLA_MACSEC_ENCODING_SA + 1;
3392 flag < IFLA_MACSEC_VALIDATION;
3393 flag++) {
3394 if (data[flag]) {
3395 if (nla_get_u8(data[flag]) > 1)
3396 return -EINVAL;
3397 }
3398 }
3399
3400 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
3401 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
3402 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
3403
3404 if ((sci && (scb || es)) || (scb && es))
3405 return -EINVAL;
3406
3407 if (data[IFLA_MACSEC_VALIDATION] &&
3408 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
3409 return -EINVAL;
3410
3411 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
3412 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
3413 !data[IFLA_MACSEC_WINDOW])
3414 return -EINVAL;
3415
3416 return 0;
3417}
3418
3419static struct net *macsec_get_link_net(const struct net_device *dev)
3420{
3421 return dev_net(macsec_priv(dev)->real_dev);
3422}
3423
3424static size_t macsec_get_size(const struct net_device *dev)
3425{
3426 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
3427 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
3428 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
3429 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
3430 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
3431 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
3432 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
3433 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
3434 nla_total_size(1) + /* IFLA_MACSEC_ES */
3435 nla_total_size(1) + /* IFLA_MACSEC_SCB */
3436 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
3437 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
3438 0;
3439}
3440
3441static int macsec_fill_info(struct sk_buff *skb,
3442 const struct net_device *dev)
3443{
3444 struct macsec_secy *secy = &macsec_priv(dev)->secy;
3445 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3446 u64 csid;
3447
3448 switch (secy->key_len) {
3449 case MACSEC_GCM_AES_128_SAK_LEN:
3450 csid = MACSEC_DEFAULT_CIPHER_ID;
3451 break;
3452 case MACSEC_GCM_AES_256_SAK_LEN:
3453 csid = MACSEC_CIPHER_ID_GCM_AES_256;
3454 break;
3455 default:
3456 goto nla_put_failure;
3457 }
3458
3459 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
3460 IFLA_MACSEC_PAD) ||
3461 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
3462 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
3463 csid, IFLA_MACSEC_PAD) ||
3464 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
3465 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
3466 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
3467 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
3468 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
3469 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
3470 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
3471 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
3472 0)
3473 goto nla_put_failure;
3474
3475 if (secy->replay_protect) {
3476 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
3477 goto nla_put_failure;
3478 }
3479
3480 return 0;
3481
3482nla_put_failure:
3483 return -EMSGSIZE;
3484}
3485
3486static struct rtnl_link_ops macsec_link_ops __read_mostly = {
3487 .kind = "macsec",
3488 .priv_size = sizeof(struct macsec_dev),
3489 .maxtype = IFLA_MACSEC_MAX,
3490 .policy = macsec_rtnl_policy,
3491 .setup = macsec_setup,
3492 .validate = macsec_validate_attr,
3493 .newlink = macsec_newlink,
3494 .changelink = macsec_changelink,
3495 .dellink = macsec_dellink,
3496 .get_size = macsec_get_size,
3497 .fill_info = macsec_fill_info,
3498 .get_link_net = macsec_get_link_net,
3499};
3500
3501static bool is_macsec_master(struct net_device *dev)
3502{
3503 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
3504}
3505
3506static int macsec_notify(struct notifier_block *this, unsigned long event,
3507 void *ptr)
3508{
3509 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
3510 LIST_HEAD(head);
3511
3512 if (!is_macsec_master(real_dev))
3513 return NOTIFY_DONE;
3514
3515 switch (event) {
3516 case NETDEV_DOWN:
3517 case NETDEV_UP:
3518 case NETDEV_CHANGE: {
3519 struct macsec_dev *m, *n;
3520 struct macsec_rxh_data *rxd;
3521
3522 rxd = macsec_data_rtnl(real_dev);
3523 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3524 struct net_device *dev = m->secy.netdev;
3525
3526 netif_stacked_transfer_operstate(real_dev, dev);
3527 }
3528 break;
3529 }
3530 case NETDEV_UNREGISTER: {
3531 struct macsec_dev *m, *n;
3532 struct macsec_rxh_data *rxd;
3533
3534 rxd = macsec_data_rtnl(real_dev);
3535 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3536 macsec_common_dellink(m->secy.netdev, &head);
3537 }
3538
3539 netdev_rx_handler_unregister(real_dev);
3540 kfree(rxd);
3541
3542 unregister_netdevice_many(&head);
3543 break;
3544 }
3545 case NETDEV_CHANGEMTU: {
3546 struct macsec_dev *m;
3547 struct macsec_rxh_data *rxd;
3548
3549 rxd = macsec_data_rtnl(real_dev);
3550 list_for_each_entry(m, &rxd->secys, secys) {
3551 struct net_device *dev = m->secy.netdev;
3552 unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
3553 macsec_extra_len(true));
3554
3555 if (dev->mtu > mtu)
3556 dev_set_mtu(dev, mtu);
3557 }
3558 }
3559 }
3560
3561 return NOTIFY_OK;
3562}
3563
3564static struct notifier_block macsec_notifier = {
3565 .notifier_call = macsec_notify,
3566};
3567
3568static int __init macsec_init(void)
3569{
3570 int err;
3571
3572 pr_info("MACsec IEEE 802.1AE\n");
3573 err = register_netdevice_notifier(&macsec_notifier);
3574 if (err)
3575 return err;
3576
3577 err = rtnl_link_register(&macsec_link_ops);
3578 if (err)
3579 goto notifier;
3580
3581 err = genl_register_family(&macsec_fam);
3582 if (err)
3583 goto rtnl;
3584
3585 return 0;
3586
3587rtnl:
3588 rtnl_link_unregister(&macsec_link_ops);
3589notifier:
3590 unregister_netdevice_notifier(&macsec_notifier);
3591 return err;
3592}
3593
3594static void __exit macsec_exit(void)
3595{
3596 genl_unregister_family(&macsec_fam);
3597 rtnl_link_unregister(&macsec_link_ops);
3598 unregister_netdevice_notifier(&macsec_notifier);
3599 rcu_barrier();
3600}
3601
3602module_init(macsec_init);
3603module_exit(macsec_exit);
3604
3605MODULE_ALIAS_RTNL_LINK("macsec");
3606MODULE_ALIAS_GENL_FAMILY("macsec");
3607
3608MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3609MODULE_LICENSE("GPL v2");