blob: a1cdb43e721676de1a90a99839608ffed20f0579 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C)2002 USAGI/WIDE Project
4 *
5 * Authors
6 *
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 *
11 * This file is derived from net/ipv4/esp.c
12 */
13
14#define pr_fmt(fmt) "IPv6: " fmt
15
16#include <crypto/aead.h>
17#include <crypto/authenc.h>
18#include <linux/err.h>
19#include <linux/module.h>
20#include <net/ip.h>
21#include <net/xfrm.h>
22#include <net/esp.h>
23#include <linux/scatterlist.h>
24#include <linux/kernel.h>
25#include <linux/pfkeyv2.h>
26#include <linux/random.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <net/ip6_route.h>
30#include <net/icmp.h>
31#include <net/ipv6.h>
32#include <net/protocol.h>
33#include <linux/icmpv6.h>
34
35#include <linux/highmem.h>
36
37struct esp_skb_cb {
38 struct xfrm_skb_cb xfrm;
39 void *tmp;
40};
41
42#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
43
44/*
45 * Allocate an AEAD request structure with extra space for SG and IV.
46 *
47 * For alignment considerations the upper 32 bits of the sequence number are
48 * placed at the front, if present. Followed by the IV, the request and finally
49 * the SG list.
50 *
51 * TODO: Use spare space in skb for this where possible.
52 */
53static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
54{
55 unsigned int len;
56
57 len = seqihlen;
58
59 len += crypto_aead_ivsize(aead);
60
61 if (len) {
62 len += crypto_aead_alignmask(aead) &
63 ~(crypto_tfm_ctx_alignment() - 1);
64 len = ALIGN(len, crypto_tfm_ctx_alignment());
65 }
66
67 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
68 len = ALIGN(len, __alignof__(struct scatterlist));
69
70 len += sizeof(struct scatterlist) * nfrags;
71
72 return kmalloc(len, GFP_ATOMIC);
73}
74
75static inline __be32 *esp_tmp_seqhi(void *tmp)
76{
77 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
78}
79
80static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
81{
82 return crypto_aead_ivsize(aead) ?
83 PTR_ALIGN((u8 *)tmp + seqhilen,
84 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
85}
86
87static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
88{
89 struct aead_request *req;
90
91 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
92 crypto_tfm_ctx_alignment());
93 aead_request_set_tfm(req, aead);
94 return req;
95}
96
97static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
98 struct aead_request *req)
99{
100 return (void *)ALIGN((unsigned long)(req + 1) +
101 crypto_aead_reqsize(aead),
102 __alignof__(struct scatterlist));
103}
104
105static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
106{
107 struct crypto_aead *aead = x->data;
108 int seqhilen = 0;
109 u8 *iv;
110 struct aead_request *req;
111 struct scatterlist *sg;
112
113 if (x->props.flags & XFRM_STATE_ESN)
114 seqhilen += sizeof(__be32);
115
116 iv = esp_tmp_iv(aead, tmp, seqhilen);
117 req = esp_tmp_req(aead, iv);
118
119 /* Unref skb_frag_pages in the src scatterlist if necessary.
120 * Skip the first sg which comes from skb->data.
121 */
122 if (req->src != req->dst)
123 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
124 put_page(sg_page(sg));
125}
126
127static void esp_output_done(struct crypto_async_request *base, int err)
128{
129 struct sk_buff *skb = base->data;
130 struct xfrm_offload *xo = xfrm_offload(skb);
131 void *tmp;
132 struct xfrm_state *x;
133
134 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
135 struct sec_path *sp = skb_sec_path(skb);
136
137 x = sp->xvec[sp->len - 1];
138 } else {
139 x = skb_dst(skb)->xfrm;
140 }
141
142 tmp = ESP_SKB_CB(skb)->tmp;
143 esp_ssg_unref(x, tmp);
144 kfree(tmp);
145
146 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
147 if (err) {
148 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
149 kfree_skb(skb);
150 return;
151 }
152
153 skb_push(skb, skb->data - skb_mac_header(skb));
154 secpath_reset(skb);
155 xfrm_dev_resume(skb);
156 } else {
157 xfrm_output_resume(skb, err);
158 }
159}
160
161/* Move ESP header back into place. */
162static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
163{
164 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
165 void *tmp = ESP_SKB_CB(skb)->tmp;
166 __be32 *seqhi = esp_tmp_seqhi(tmp);
167
168 esph->seq_no = esph->spi;
169 esph->spi = *seqhi;
170}
171
172static void esp_output_restore_header(struct sk_buff *skb)
173{
174 esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
175}
176
177static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
178 struct xfrm_state *x,
179 struct ip_esp_hdr *esph,
180 __be32 *seqhi)
181{
182 /* For ESN we move the header forward by 4 bytes to
183 * accomodate the high bits. We will move it back after
184 * encryption.
185 */
186 if ((x->props.flags & XFRM_STATE_ESN)) {
187 struct xfrm_offload *xo = xfrm_offload(skb);
188
189 esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
190 *seqhi = esph->spi;
191 if (xo)
192 esph->seq_no = htonl(xo->seq.hi);
193 else
194 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
195 }
196
197 esph->spi = x->id.spi;
198
199 return esph;
200}
201
202static void esp_output_done_esn(struct crypto_async_request *base, int err)
203{
204 struct sk_buff *skb = base->data;
205
206 esp_output_restore_header(skb);
207 esp_output_done(base, err);
208}
209
210static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
211{
212 /* Fill padding... */
213 if (tfclen) {
214 memset(tail, 0, tfclen);
215 tail += tfclen;
216 }
217 do {
218 int i;
219 for (i = 0; i < plen - 2; i++)
220 tail[i] = i + 1;
221 } while (0);
222 tail[plen - 2] = plen - 2;
223 tail[plen - 1] = proto;
224}
225
226int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
227{
228 u8 *tail;
229 int nfrags;
230 struct page *page;
231 struct sk_buff *trailer;
232 int tailen = esp->tailen;
233
234 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
235 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
236 goto cow;
237
238 if (!skb_cloned(skb)) {
239 if (tailen <= skb_tailroom(skb)) {
240 nfrags = 1;
241 trailer = skb;
242 tail = skb_tail_pointer(trailer);
243
244 goto skip_cow;
245 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
246 && !skb_has_frag_list(skb)) {
247 int allocsize;
248 struct sock *sk = skb->sk;
249 struct page_frag *pfrag = &x->xfrag;
250
251 esp->inplace = false;
252
253 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
254
255 spin_lock_bh(&x->lock);
256
257 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
258 spin_unlock_bh(&x->lock);
259 goto cow;
260 }
261
262 page = pfrag->page;
263 get_page(page);
264
265 tail = page_address(page) + pfrag->offset;
266
267 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
268
269 nfrags = skb_shinfo(skb)->nr_frags;
270
271 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
272 tailen);
273 skb_shinfo(skb)->nr_frags = ++nfrags;
274
275 pfrag->offset = pfrag->offset + allocsize;
276
277 spin_unlock_bh(&x->lock);
278
279 nfrags++;
280
281 skb->len += tailen;
282 skb->data_len += tailen;
283 skb->truesize += tailen;
284 if (sk && sk_fullsock(sk))
285 refcount_add(tailen, &sk->sk_wmem_alloc);
286
287 goto out;
288 }
289 }
290
291cow:
292 nfrags = skb_cow_data(skb, tailen, &trailer);
293 if (nfrags < 0)
294 goto out;
295 tail = skb_tail_pointer(trailer);
296
297skip_cow:
298 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
299 pskb_put(skb, trailer, tailen);
300
301out:
302 return nfrags;
303}
304EXPORT_SYMBOL_GPL(esp6_output_head);
305
306int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
307{
308 u8 *iv;
309 int alen;
310 void *tmp;
311 int ivlen;
312 int assoclen;
313 int seqhilen;
314 __be32 *seqhi;
315 struct page *page;
316 struct ip_esp_hdr *esph;
317 struct aead_request *req;
318 struct crypto_aead *aead;
319 struct scatterlist *sg, *dsg;
320 int err = -ENOMEM;
321
322 assoclen = sizeof(struct ip_esp_hdr);
323 seqhilen = 0;
324
325 if (x->props.flags & XFRM_STATE_ESN) {
326 seqhilen += sizeof(__be32);
327 assoclen += sizeof(__be32);
328 }
329
330 aead = x->data;
331 alen = crypto_aead_authsize(aead);
332 ivlen = crypto_aead_ivsize(aead);
333
334 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen);
335 if (!tmp)
336 goto error;
337
338 seqhi = esp_tmp_seqhi(tmp);
339 iv = esp_tmp_iv(aead, tmp, seqhilen);
340 req = esp_tmp_req(aead, iv);
341 sg = esp_req_sg(aead, req);
342
343 if (esp->inplace)
344 dsg = sg;
345 else
346 dsg = &sg[esp->nfrags];
347
348 esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
349
350 sg_init_table(sg, esp->nfrags);
351 err = skb_to_sgvec(skb, sg,
352 (unsigned char *)esph - skb->data,
353 assoclen + ivlen + esp->clen + alen);
354 if (unlikely(err < 0))
355 goto error_free;
356
357 if (!esp->inplace) {
358 int allocsize;
359 struct page_frag *pfrag = &x->xfrag;
360
361 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
362
363 spin_lock_bh(&x->lock);
364 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
365 spin_unlock_bh(&x->lock);
366 goto error_free;
367 }
368
369 skb_shinfo(skb)->nr_frags = 1;
370
371 page = pfrag->page;
372 get_page(page);
373 /* replace page frags in skb with new page */
374 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
375 pfrag->offset = pfrag->offset + allocsize;
376 spin_unlock_bh(&x->lock);
377
378 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
379 err = skb_to_sgvec(skb, dsg,
380 (unsigned char *)esph - skb->data,
381 assoclen + ivlen + esp->clen + alen);
382 if (unlikely(err < 0))
383 goto error_free;
384 }
385
386 if ((x->props.flags & XFRM_STATE_ESN))
387 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
388 else
389 aead_request_set_callback(req, 0, esp_output_done, skb);
390
391 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
392 aead_request_set_ad(req, assoclen);
393
394 memset(iv, 0, ivlen);
395 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
396 min(ivlen, 8));
397
398 ESP_SKB_CB(skb)->tmp = tmp;
399 err = crypto_aead_encrypt(req);
400
401 switch (err) {
402 case -EINPROGRESS:
403 goto error;
404
405 case -ENOSPC:
406 err = NET_XMIT_DROP;
407 break;
408
409 case 0:
410 if ((x->props.flags & XFRM_STATE_ESN))
411 esp_output_restore_header(skb);
412 }
413
414 if (sg != dsg)
415 esp_ssg_unref(x, tmp);
416
417error_free:
418 kfree(tmp);
419error:
420 return err;
421}
422EXPORT_SYMBOL_GPL(esp6_output_tail);
423
424static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
425{
426 int alen;
427 int blksize;
428 struct ip_esp_hdr *esph;
429 struct crypto_aead *aead;
430 struct esp_info esp;
431
432 esp.inplace = true;
433
434 esp.proto = *skb_mac_header(skb);
435 *skb_mac_header(skb) = IPPROTO_ESP;
436
437 /* skb is pure payload to encrypt */
438
439 aead = x->data;
440 alen = crypto_aead_authsize(aead);
441
442 esp.tfclen = 0;
443 if (x->tfcpad) {
444 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
445 u32 padto;
446
447 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
448 if (skb->len < padto)
449 esp.tfclen = padto - skb->len;
450 }
451 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
452 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
453 esp.plen = esp.clen - skb->len - esp.tfclen;
454 esp.tailen = esp.tfclen + esp.plen + alen;
455
456 esp.nfrags = esp6_output_head(x, skb, &esp);
457 if (esp.nfrags < 0)
458 return esp.nfrags;
459
460 esph = ip_esp_hdr(skb);
461 esph->spi = x->id.spi;
462
463 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
464 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
465 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
466
467 skb_push(skb, -skb_network_offset(skb));
468
469 return esp6_output_tail(x, skb, &esp);
470}
471
472static inline int esp_remove_trailer(struct sk_buff *skb)
473{
474 struct xfrm_state *x = xfrm_input_state(skb);
475 struct xfrm_offload *xo = xfrm_offload(skb);
476 struct crypto_aead *aead = x->data;
477 int alen, hlen, elen;
478 int padlen, trimlen;
479 __wsum csumdiff;
480 u8 nexthdr[2];
481 int ret;
482
483 alen = crypto_aead_authsize(aead);
484 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
485 elen = skb->len - hlen;
486
487 if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
488 ret = xo->proto;
489 goto out;
490 }
491
492 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
493 BUG_ON(ret);
494
495 ret = -EINVAL;
496 padlen = nexthdr[0];
497 if (padlen + 2 + alen >= elen) {
498 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
499 padlen + 2, elen - alen);
500 goto out;
501 }
502
503 trimlen = alen + padlen + 2;
504 if (skb->ip_summed == CHECKSUM_COMPLETE) {
505 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
506 skb->csum = csum_block_sub(skb->csum, csumdiff,
507 skb->len - trimlen);
508 }
509 ret = pskb_trim(skb, skb->len - trimlen);
510 if (unlikely(ret))
511 return ret;
512
513 ret = nexthdr[1];
514
515out:
516 return ret;
517}
518
519int esp6_input_done2(struct sk_buff *skb, int err)
520{
521 struct xfrm_state *x = xfrm_input_state(skb);
522 struct xfrm_offload *xo = xfrm_offload(skb);
523 struct crypto_aead *aead = x->data;
524 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
525 int hdr_len = skb_network_header_len(skb);
526
527 if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
528 kfree(ESP_SKB_CB(skb)->tmp);
529
530 if (unlikely(err))
531 goto out;
532
533 err = esp_remove_trailer(skb);
534 if (unlikely(err < 0))
535 goto out;
536
537 skb_postpull_rcsum(skb, skb_network_header(skb),
538 skb_network_header_len(skb));
539 skb_pull_rcsum(skb, hlen);
540 if (x->props.mode == XFRM_MODE_TUNNEL)
541 skb_reset_transport_header(skb);
542 else
543 skb_set_transport_header(skb, -hdr_len);
544
545 /* RFC4303: Drop dummy packets without any error */
546 if (err == IPPROTO_NONE)
547 err = -EINVAL;
548
549out:
550 return err;
551}
552EXPORT_SYMBOL_GPL(esp6_input_done2);
553
554static void esp_input_done(struct crypto_async_request *base, int err)
555{
556 struct sk_buff *skb = base->data;
557
558 xfrm_input_resume(skb, esp6_input_done2(skb, err));
559}
560
561static void esp_input_restore_header(struct sk_buff *skb)
562{
563 esp_restore_header(skb, 0);
564 __skb_pull(skb, 4);
565}
566
567static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
568{
569 struct xfrm_state *x = xfrm_input_state(skb);
570
571 /* For ESN we move the header forward by 4 bytes to
572 * accomodate the high bits. We will move it back after
573 * decryption.
574 */
575 if ((x->props.flags & XFRM_STATE_ESN)) {
576 struct ip_esp_hdr *esph = skb_push(skb, 4);
577
578 *seqhi = esph->spi;
579 esph->spi = esph->seq_no;
580 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
581 }
582}
583
584static void esp_input_done_esn(struct crypto_async_request *base, int err)
585{
586 struct sk_buff *skb = base->data;
587
588 esp_input_restore_header(skb);
589 esp_input_done(base, err);
590}
591
592static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
593{
594 struct crypto_aead *aead = x->data;
595 struct aead_request *req;
596 struct sk_buff *trailer;
597 int ivlen = crypto_aead_ivsize(aead);
598 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
599 int nfrags;
600 int assoclen;
601 int seqhilen;
602 int ret = 0;
603 void *tmp;
604 __be32 *seqhi;
605 u8 *iv;
606 struct scatterlist *sg;
607
608 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
609 ret = -EINVAL;
610 goto out;
611 }
612
613 if (elen <= 0) {
614 ret = -EINVAL;
615 goto out;
616 }
617
618 assoclen = sizeof(struct ip_esp_hdr);
619 seqhilen = 0;
620
621 if (x->props.flags & XFRM_STATE_ESN) {
622 seqhilen += sizeof(__be32);
623 assoclen += seqhilen;
624 }
625
626 if (!skb_cloned(skb)) {
627 if (!skb_is_nonlinear(skb)) {
628 nfrags = 1;
629
630 goto skip_cow;
631 } else if (!skb_has_frag_list(skb)) {
632 nfrags = skb_shinfo(skb)->nr_frags;
633 nfrags++;
634
635 goto skip_cow;
636 }
637 }
638
639 nfrags = skb_cow_data(skb, 0, &trailer);
640 if (nfrags < 0) {
641 ret = -EINVAL;
642 goto out;
643 }
644
645skip_cow:
646 ret = -ENOMEM;
647 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
648 if (!tmp)
649 goto out;
650
651 ESP_SKB_CB(skb)->tmp = tmp;
652 seqhi = esp_tmp_seqhi(tmp);
653 iv = esp_tmp_iv(aead, tmp, seqhilen);
654 req = esp_tmp_req(aead, iv);
655 sg = esp_req_sg(aead, req);
656
657 esp_input_set_header(skb, seqhi);
658
659 sg_init_table(sg, nfrags);
660 ret = skb_to_sgvec(skb, sg, 0, skb->len);
661 if (unlikely(ret < 0)) {
662 kfree(tmp);
663 goto out;
664 }
665
666 skb->ip_summed = CHECKSUM_NONE;
667
668 if ((x->props.flags & XFRM_STATE_ESN))
669 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
670 else
671 aead_request_set_callback(req, 0, esp_input_done, skb);
672
673 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
674 aead_request_set_ad(req, assoclen);
675
676 ret = crypto_aead_decrypt(req);
677 if (ret == -EINPROGRESS)
678 goto out;
679
680 if ((x->props.flags & XFRM_STATE_ESN))
681 esp_input_restore_header(skb);
682
683 ret = esp6_input_done2(skb, ret);
684
685out:
686 return ret;
687}
688
689static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
690 u8 type, u8 code, int offset, __be32 info)
691{
692 struct net *net = dev_net(skb->dev);
693 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
694 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
695 struct xfrm_state *x;
696
697 if (type != ICMPV6_PKT_TOOBIG &&
698 type != NDISC_REDIRECT)
699 return 0;
700
701 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
702 esph->spi, IPPROTO_ESP, AF_INET6);
703 if (!x)
704 return 0;
705
706 if (type == NDISC_REDIRECT)
707 ip6_redirect(skb, net, skb->dev->ifindex, 0,
708 sock_net_uid(net, NULL));
709 else
710 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
711 xfrm_state_put(x);
712
713 return 0;
714}
715
716static void esp6_destroy(struct xfrm_state *x)
717{
718 struct crypto_aead *aead = x->data;
719
720 if (!aead)
721 return;
722
723 crypto_free_aead(aead);
724}
725
726static int esp_init_aead(struct xfrm_state *x)
727{
728 char aead_name[CRYPTO_MAX_ALG_NAME];
729 struct crypto_aead *aead;
730 int err;
731
732 err = -ENAMETOOLONG;
733 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
734 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
735 goto error;
736
737 aead = crypto_alloc_aead(aead_name, 0, 0);
738 err = PTR_ERR(aead);
739 if (IS_ERR(aead))
740 goto error;
741
742 x->data = aead;
743
744 err = crypto_aead_setkey(aead, x->aead->alg_key,
745 (x->aead->alg_key_len + 7) / 8);
746 if (err)
747 goto error;
748
749 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
750 if (err)
751 goto error;
752
753error:
754 return err;
755}
756
757static int esp_init_authenc(struct xfrm_state *x)
758{
759 struct crypto_aead *aead;
760 struct crypto_authenc_key_param *param;
761 struct rtattr *rta;
762 char *key;
763 char *p;
764 char authenc_name[CRYPTO_MAX_ALG_NAME];
765 unsigned int keylen;
766 int err;
767
768 err = -EINVAL;
769 if (!x->ealg)
770 goto error;
771
772 err = -ENAMETOOLONG;
773
774 if ((x->props.flags & XFRM_STATE_ESN)) {
775 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
776 "%s%sauthencesn(%s,%s)%s",
777 x->geniv ?: "", x->geniv ? "(" : "",
778 x->aalg ? x->aalg->alg_name : "digest_null",
779 x->ealg->alg_name,
780 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
781 goto error;
782 } else {
783 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
784 "%s%sauthenc(%s,%s)%s",
785 x->geniv ?: "", x->geniv ? "(" : "",
786 x->aalg ? x->aalg->alg_name : "digest_null",
787 x->ealg->alg_name,
788 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
789 goto error;
790 }
791
792 aead = crypto_alloc_aead(authenc_name, 0, 0);
793 err = PTR_ERR(aead);
794 if (IS_ERR(aead))
795 goto error;
796
797 x->data = aead;
798
799 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
800 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
801 err = -ENOMEM;
802 key = kmalloc(keylen, GFP_KERNEL);
803 if (!key)
804 goto error;
805
806 p = key;
807 rta = (void *)p;
808 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
809 rta->rta_len = RTA_LENGTH(sizeof(*param));
810 param = RTA_DATA(rta);
811 p += RTA_SPACE(sizeof(*param));
812
813 if (x->aalg) {
814 struct xfrm_algo_desc *aalg_desc;
815
816 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
817 p += (x->aalg->alg_key_len + 7) / 8;
818
819 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
820 BUG_ON(!aalg_desc);
821
822 err = -EINVAL;
823 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
824 crypto_aead_authsize(aead)) {
825 pr_info("ESP: %s digestsize %u != %hu\n",
826 x->aalg->alg_name,
827 crypto_aead_authsize(aead),
828 aalg_desc->uinfo.auth.icv_fullbits / 8);
829 goto free_key;
830 }
831
832 err = crypto_aead_setauthsize(
833 aead, x->aalg->alg_trunc_len / 8);
834 if (err)
835 goto free_key;
836 }
837
838 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
839 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
840
841 err = crypto_aead_setkey(aead, key, keylen);
842
843free_key:
844 kfree(key);
845
846error:
847 return err;
848}
849
850static int esp6_init_state(struct xfrm_state *x)
851{
852 struct crypto_aead *aead;
853 u32 align;
854 int err;
855
856 if (x->encap)
857 return -EINVAL;
858
859 x->data = NULL;
860
861 if (x->aead)
862 err = esp_init_aead(x);
863 else
864 err = esp_init_authenc(x);
865
866 if (err)
867 goto error;
868
869 aead = x->data;
870
871 x->props.header_len = sizeof(struct ip_esp_hdr) +
872 crypto_aead_ivsize(aead);
873 switch (x->props.mode) {
874 case XFRM_MODE_BEET:
875 if (x->sel.family != AF_INET6)
876 x->props.header_len += IPV4_BEET_PHMAXLEN +
877 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
878 break;
879 default:
880 case XFRM_MODE_TRANSPORT:
881 break;
882 case XFRM_MODE_TUNNEL:
883 x->props.header_len += sizeof(struct ipv6hdr);
884 break;
885 }
886
887 align = ALIGN(crypto_aead_blocksize(aead), 4);
888 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
889
890error:
891 return err;
892}
893
894static int esp6_rcv_cb(struct sk_buff *skb, int err)
895{
896 return 0;
897}
898
899static const struct xfrm_type esp6_type = {
900 .description = "ESP6",
901 .owner = THIS_MODULE,
902 .proto = IPPROTO_ESP,
903 .flags = XFRM_TYPE_REPLAY_PROT,
904 .init_state = esp6_init_state,
905 .destructor = esp6_destroy,
906 .input = esp6_input,
907 .output = esp6_output,
908 .hdr_offset = xfrm6_find_1stfragopt,
909};
910
911static struct xfrm6_protocol esp6_protocol = {
912 .handler = xfrm6_rcv,
913 .cb_handler = esp6_rcv_cb,
914 .err_handler = esp6_err,
915 .priority = 0,
916};
917
918static int __init esp6_init(void)
919{
920 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
921 pr_info("%s: can't add xfrm type\n", __func__);
922 return -EAGAIN;
923 }
924 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
925 pr_info("%s: can't add protocol\n", __func__);
926 xfrm_unregister_type(&esp6_type, AF_INET6);
927 return -EAGAIN;
928 }
929
930 return 0;
931}
932
933static void __exit esp6_fini(void)
934{
935 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
936 pr_info("%s: can't remove protocol\n", __func__);
937 xfrm_unregister_type(&esp6_type, AF_INET6);
938}
939
940module_init(esp6_init);
941module_exit(esp6_fini);
942
943MODULE_LICENSE("GPL");
944MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);