blob: 114f9def1ec548768a7eb505ef7b0a62b4dbc4c6 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001#define pr_fmt(fmt) "IPsec: " fmt
2
3#include <crypto/aead.h>
4#include <crypto/authenc.h>
5#include <linux/err.h>
6#include <linux/module.h>
7#include <net/ip.h>
8#include <net/xfrm.h>
9#include <net/esp.h>
10#include <linux/scatterlist.h>
11#include <linux/kernel.h>
12#include <linux/pfkeyv2.h>
13#include <linux/rtnetlink.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/in6.h>
17#include <net/icmp.h>
18#include <net/protocol.h>
19#include <net/udp.h>
20
21#include <linux/highmem.h>
22
23struct esp_skb_cb {
24 struct xfrm_skb_cb xfrm;
25 void *tmp;
26};
27
28struct esp_output_extra {
29 __be32 seqhi;
30 u32 esphoff;
31};
32
33#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
34
35static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
36
37/*
38 * Allocate an AEAD request structure with extra space for SG and IV.
39 *
40 * For alignment considerations the IV is placed at the front, followed
41 * by the request and finally the SG list.
42 *
43 * TODO: Use spare space in skb for this where possible.
44 */
45static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
46{
47 unsigned int len;
48
49 len = extralen;
50
51 len += crypto_aead_ivsize(aead);
52
53 if (len) {
54 len += crypto_aead_alignmask(aead) &
55 ~(crypto_tfm_ctx_alignment() - 1);
56 len = ALIGN(len, crypto_tfm_ctx_alignment());
57 }
58
59 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
60 len = ALIGN(len, __alignof__(struct scatterlist));
61
62 len += sizeof(struct scatterlist) * nfrags;
63
64 return kmalloc(len, GFP_ATOMIC);
65}
66
67static inline void *esp_tmp_extra(void *tmp)
68{
69 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
70}
71
72static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
73{
74 return crypto_aead_ivsize(aead) ?
75 PTR_ALIGN((u8 *)tmp + extralen,
76 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
77}
78
79static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
80{
81 struct aead_request *req;
82
83 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
84 crypto_tfm_ctx_alignment());
85 aead_request_set_tfm(req, aead);
86 return req;
87}
88
89static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
90 struct aead_request *req)
91{
92 return (void *)ALIGN((unsigned long)(req + 1) +
93 crypto_aead_reqsize(aead),
94 __alignof__(struct scatterlist));
95}
96
97static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
98{
99 struct esp_output_extra *extra = esp_tmp_extra(tmp);
100 struct crypto_aead *aead = x->data;
101 int extralen = 0;
102 u8 *iv;
103 struct aead_request *req;
104 struct scatterlist *sg;
105
106 if (x->props.flags & XFRM_STATE_ESN)
107 extralen += sizeof(*extra);
108
109 extra = esp_tmp_extra(tmp);
110 iv = esp_tmp_iv(aead, tmp, extralen);
111 req = esp_tmp_req(aead, iv);
112
113 /* Unref skb_frag_pages in the src scatterlist if necessary.
114 * Skip the first sg which comes from skb->data.
115 */
116 if (req->src != req->dst)
117 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
118 put_page(sg_page(sg));
119}
120
121static void esp_output_done(struct crypto_async_request *base, int err)
122{
123 struct sk_buff *skb = base->data;
124 struct xfrm_offload *xo = xfrm_offload(skb);
125 void *tmp;
126 struct xfrm_state *x;
127
128 if (xo && (xo->flags & XFRM_DEV_RESUME))
129 x = skb->sp->xvec[skb->sp->len - 1];
130 else
131 x = skb_dst(skb)->xfrm;
132
133 tmp = ESP_SKB_CB(skb)->tmp;
134 esp_ssg_unref(x, tmp);
135 kfree(tmp);
136
137 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
138 if (err) {
139 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
140 kfree_skb(skb);
141 return;
142 }
143
144 skb_push(skb, skb->data - skb_mac_header(skb));
145 secpath_reset(skb);
146 xfrm_dev_resume(skb);
147 } else {
148 xfrm_output_resume(skb, err);
149 }
150}
151
152/* Move ESP header back into place. */
153static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
154{
155 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
156 void *tmp = ESP_SKB_CB(skb)->tmp;
157 __be32 *seqhi = esp_tmp_extra(tmp);
158
159 esph->seq_no = esph->spi;
160 esph->spi = *seqhi;
161}
162
163static void esp_output_restore_header(struct sk_buff *skb)
164{
165 void *tmp = ESP_SKB_CB(skb)->tmp;
166 struct esp_output_extra *extra = esp_tmp_extra(tmp);
167
168 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
169 sizeof(__be32));
170}
171
172static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
173 struct xfrm_state *x,
174 struct ip_esp_hdr *esph,
175 struct esp_output_extra *extra)
176{
177 /* For ESN we move the header forward by 4 bytes to
178 * accomodate the high bits. We will move it back after
179 * encryption.
180 */
181 if ((x->props.flags & XFRM_STATE_ESN)) {
182 __u32 seqhi;
183 struct xfrm_offload *xo = xfrm_offload(skb);
184
185 if (xo)
186 seqhi = xo->seq.hi;
187 else
188 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
189
190 extra->esphoff = (unsigned char *)esph -
191 skb_transport_header(skb);
192 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
193 extra->seqhi = esph->spi;
194 esph->seq_no = htonl(seqhi);
195 }
196
197 esph->spi = x->id.spi;
198
199 return esph;
200}
201
202static void esp_output_done_esn(struct crypto_async_request *base, int err)
203{
204 struct sk_buff *skb = base->data;
205
206 esp_output_restore_header(skb);
207 esp_output_done(base, err);
208}
209
210static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
211{
212 /* Fill padding... */
213 if (tfclen) {
214 memset(tail, 0, tfclen);
215 tail += tfclen;
216 }
217 do {
218 int i;
219 for (i = 0; i < plen - 2; i++)
220 tail[i] = i + 1;
221 } while (0);
222 tail[plen - 2] = plen - 2;
223 tail[plen - 1] = proto;
224}
225
226static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
227{
228 int encap_type;
229 struct udphdr *uh;
230 __be32 *udpdata32;
231 __be16 sport, dport;
232 struct xfrm_encap_tmpl *encap = x->encap;
233 struct ip_esp_hdr *esph = esp->esph;
234 unsigned int len;
235
236 spin_lock_bh(&x->lock);
237 sport = encap->encap_sport;
238 dport = encap->encap_dport;
239 encap_type = encap->encap_type;
240 spin_unlock_bh(&x->lock);
241
242 len = skb->len + esp->tailen - skb_transport_offset(skb);
243 if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
244 return -EMSGSIZE;
245
246 uh = (struct udphdr *)esph;
247 uh->source = sport;
248 uh->dest = dport;
249 uh->len = htons(len);
250 uh->check = 0;
251
252 switch (encap_type) {
253 default:
254 case UDP_ENCAP_ESPINUDP:
255 esph = (struct ip_esp_hdr *)(uh + 1);
256 break;
257 case UDP_ENCAP_ESPINUDP_NON_IKE:
258 udpdata32 = (__be32 *)(uh + 1);
259 udpdata32[0] = udpdata32[1] = 0;
260 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
261 break;
262 }
263
264 *skb_mac_header(skb) = IPPROTO_UDP;
265 esp->esph = esph;
266
267 return 0;
268}
269
270int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
271{
272 u8 *tail;
273 u8 *vaddr;
274 int nfrags;
275 int esph_offset;
276 struct page *page;
277 struct sk_buff *trailer;
278 int tailen = esp->tailen;
279
280 /* this is non-NULL only with UDP Encapsulation */
281 if (x->encap) {
282 int err = esp_output_udp_encap(x, skb, esp);
283
284 if (err < 0)
285 return err;
286 }
287
288 if (!skb_cloned(skb)) {
289 if (tailen <= skb_tailroom(skb)) {
290 nfrags = 1;
291 trailer = skb;
292 tail = skb_tail_pointer(trailer);
293
294 goto skip_cow;
295 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
296 && !skb_has_frag_list(skb)) {
297 int allocsize;
298 struct sock *sk = skb->sk;
299 struct page_frag *pfrag = &x->xfrag;
300
301 esp->inplace = false;
302
303 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
304
305 spin_lock_bh(&x->lock);
306
307 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
308 spin_unlock_bh(&x->lock);
309 goto cow;
310 }
311
312 page = pfrag->page;
313 get_page(page);
314
315 vaddr = kmap_atomic(page);
316
317 tail = vaddr + pfrag->offset;
318
319 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
320
321 kunmap_atomic(vaddr);
322
323 nfrags = skb_shinfo(skb)->nr_frags;
324
325 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
326 tailen);
327 skb_shinfo(skb)->nr_frags = ++nfrags;
328
329 pfrag->offset = pfrag->offset + allocsize;
330
331 spin_unlock_bh(&x->lock);
332
333 nfrags++;
334
335 skb->len += tailen;
336 skb->data_len += tailen;
337 skb->truesize += tailen;
338 if (sk && sk_fullsock(sk))
339 refcount_add(tailen, &sk->sk_wmem_alloc);
340
341 goto out;
342 }
343 }
344
345cow:
346 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
347
348 nfrags = skb_cow_data(skb, tailen, &trailer);
349 if (nfrags < 0)
350 goto out;
351 tail = skb_tail_pointer(trailer);
352 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
353
354skip_cow:
355 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
356 pskb_put(skb, trailer, tailen);
357
358out:
359 return nfrags;
360}
361EXPORT_SYMBOL_GPL(esp_output_head);
362
363int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
364{
365 u8 *iv;
366 int alen;
367 void *tmp;
368 int ivlen;
369 int assoclen;
370 int extralen;
371 struct page *page;
372 struct ip_esp_hdr *esph;
373 struct crypto_aead *aead;
374 struct aead_request *req;
375 struct scatterlist *sg, *dsg;
376 struct esp_output_extra *extra;
377 int err = -ENOMEM;
378
379 assoclen = sizeof(struct ip_esp_hdr);
380 extralen = 0;
381
382 if (x->props.flags & XFRM_STATE_ESN) {
383 extralen += sizeof(*extra);
384 assoclen += sizeof(__be32);
385 }
386
387 aead = x->data;
388 alen = crypto_aead_authsize(aead);
389 ivlen = crypto_aead_ivsize(aead);
390
391 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
392 if (!tmp)
393 goto error;
394
395 extra = esp_tmp_extra(tmp);
396 iv = esp_tmp_iv(aead, tmp, extralen);
397 req = esp_tmp_req(aead, iv);
398 sg = esp_req_sg(aead, req);
399
400 if (esp->inplace)
401 dsg = sg;
402 else
403 dsg = &sg[esp->nfrags];
404
405 esph = esp_output_set_extra(skb, x, esp->esph, extra);
406 esp->esph = esph;
407
408 sg_init_table(sg, esp->nfrags);
409 err = skb_to_sgvec(skb, sg,
410 (unsigned char *)esph - skb->data,
411 assoclen + ivlen + esp->clen + alen);
412 if (unlikely(err < 0))
413 goto error_free;
414
415 if (!esp->inplace) {
416 int allocsize;
417 struct page_frag *pfrag = &x->xfrag;
418
419 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
420
421 spin_lock_bh(&x->lock);
422 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
423 spin_unlock_bh(&x->lock);
424 goto error_free;
425 }
426
427 skb_shinfo(skb)->nr_frags = 1;
428
429 page = pfrag->page;
430 get_page(page);
431 /* replace page frags in skb with new page */
432 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
433 pfrag->offset = pfrag->offset + allocsize;
434 spin_unlock_bh(&x->lock);
435
436 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
437 err = skb_to_sgvec(skb, dsg,
438 (unsigned char *)esph - skb->data,
439 assoclen + ivlen + esp->clen + alen);
440 if (unlikely(err < 0))
441 goto error_free;
442 }
443
444 if ((x->props.flags & XFRM_STATE_ESN))
445 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
446 else
447 aead_request_set_callback(req, 0, esp_output_done, skb);
448
449 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
450 aead_request_set_ad(req, assoclen);
451
452 memset(iv, 0, ivlen);
453 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
454 min(ivlen, 8));
455
456 ESP_SKB_CB(skb)->tmp = tmp;
457 err = crypto_aead_encrypt(req);
458
459 switch (err) {
460 case -EINPROGRESS:
461 goto error;
462
463 case -ENOSPC:
464 err = NET_XMIT_DROP;
465 break;
466
467 case 0:
468 if ((x->props.flags & XFRM_STATE_ESN))
469 esp_output_restore_header(skb);
470 }
471
472 if (sg != dsg)
473 esp_ssg_unref(x, tmp);
474
475error_free:
476 kfree(tmp);
477error:
478 return err;
479}
480EXPORT_SYMBOL_GPL(esp_output_tail);
481
482static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
483{
484 int alen;
485 int blksize;
486 struct ip_esp_hdr *esph;
487 struct crypto_aead *aead;
488 struct esp_info esp;
489
490 esp.inplace = true;
491
492 esp.proto = *skb_mac_header(skb);
493 *skb_mac_header(skb) = IPPROTO_ESP;
494
495 /* skb is pure payload to encrypt */
496
497 aead = x->data;
498 alen = crypto_aead_authsize(aead);
499
500 esp.tfclen = 0;
501 if (x->tfcpad) {
502 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
503 u32 padto;
504
505 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
506 if (skb->len < padto)
507 esp.tfclen = padto - skb->len;
508 }
509 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
510 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
511 esp.plen = esp.clen - skb->len - esp.tfclen;
512 esp.tailen = esp.tfclen + esp.plen + alen;
513
514 esp.esph = ip_esp_hdr(skb);
515
516 esp.nfrags = esp_output_head(x, skb, &esp);
517 if (esp.nfrags < 0)
518 return esp.nfrags;
519
520 esph = esp.esph;
521 esph->spi = x->id.spi;
522
523 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
524 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
525 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
526
527 skb_push(skb, -skb_network_offset(skb));
528
529 return esp_output_tail(x, skb, &esp);
530}
531
532static inline int esp_remove_trailer(struct sk_buff *skb)
533{
534 struct xfrm_state *x = xfrm_input_state(skb);
535 struct xfrm_offload *xo = xfrm_offload(skb);
536 struct crypto_aead *aead = x->data;
537 int alen, hlen, elen;
538 int padlen, trimlen;
539 __wsum csumdiff;
540 u8 nexthdr[2];
541 int ret;
542
543 alen = crypto_aead_authsize(aead);
544 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
545 elen = skb->len - hlen;
546
547 if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
548 ret = xo->proto;
549 goto out;
550 }
551
552 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
553 BUG();
554
555 ret = -EINVAL;
556 padlen = nexthdr[0];
557 if (padlen + 2 + alen >= elen) {
558 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
559 padlen + 2, elen - alen);
560 goto out;
561 }
562
563 trimlen = alen + padlen + 2;
564 if (skb->ip_summed == CHECKSUM_COMPLETE) {
565 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
566 skb->csum = csum_block_sub(skb->csum, csumdiff,
567 skb->len - trimlen);
568 }
569 pskb_trim(skb, skb->len - trimlen);
570
571 ret = nexthdr[1];
572
573out:
574 return ret;
575}
576
577int esp_input_done2(struct sk_buff *skb, int err)
578{
579 const struct iphdr *iph;
580 struct xfrm_state *x = xfrm_input_state(skb);
581 struct xfrm_offload *xo = xfrm_offload(skb);
582 struct crypto_aead *aead = x->data;
583 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
584 int ihl;
585
586 if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
587 kfree(ESP_SKB_CB(skb)->tmp);
588
589 if (unlikely(err))
590 goto out;
591
592 err = esp_remove_trailer(skb);
593 if (unlikely(err < 0))
594 goto out;
595
596 iph = ip_hdr(skb);
597 ihl = iph->ihl * 4;
598
599 if (x->encap) {
600 struct xfrm_encap_tmpl *encap = x->encap;
601 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
602
603 /*
604 * 1) if the NAT-T peer's IP or port changed then
605 * advertize the change to the keying daemon.
606 * This is an inbound SA, so just compare
607 * SRC ports.
608 */
609 if (iph->saddr != x->props.saddr.a4 ||
610 uh->source != encap->encap_sport) {
611 xfrm_address_t ipaddr;
612
613 ipaddr.a4 = iph->saddr;
614 km_new_mapping(x, &ipaddr, uh->source);
615
616 /* XXX: perhaps add an extra
617 * policy check here, to see
618 * if we should allow or
619 * reject a packet from a
620 * different source
621 * address/port.
622 */
623 }
624
625 /*
626 * 2) ignore UDP/TCP checksums in case
627 * of NAT-T in Transport Mode, or
628 * perform other post-processing fixes
629 * as per draft-ietf-ipsec-udp-encaps-06,
630 * section 3.1.2
631 */
632 if (x->props.mode == XFRM_MODE_TRANSPORT)
633 skb->ip_summed = CHECKSUM_UNNECESSARY;
634 }
635
636 skb_pull_rcsum(skb, hlen);
637 if (x->props.mode == XFRM_MODE_TUNNEL)
638 skb_reset_transport_header(skb);
639 else
640 skb_set_transport_header(skb, -ihl);
641
642 /* RFC4303: Drop dummy packets without any error */
643 if (err == IPPROTO_NONE)
644 err = -EINVAL;
645
646out:
647 return err;
648}
649EXPORT_SYMBOL_GPL(esp_input_done2);
650
651static void esp_input_done(struct crypto_async_request *base, int err)
652{
653 struct sk_buff *skb = base->data;
654
655 xfrm_input_resume(skb, esp_input_done2(skb, err));
656}
657
658static void esp_input_restore_header(struct sk_buff *skb)
659{
660 esp_restore_header(skb, 0);
661 __skb_pull(skb, 4);
662}
663
664static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
665{
666 struct xfrm_state *x = xfrm_input_state(skb);
667 struct ip_esp_hdr *esph;
668
669 /* For ESN we move the header forward by 4 bytes to
670 * accomodate the high bits. We will move it back after
671 * decryption.
672 */
673 if ((x->props.flags & XFRM_STATE_ESN)) {
674 esph = skb_push(skb, 4);
675 *seqhi = esph->spi;
676 esph->spi = esph->seq_no;
677 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
678 }
679}
680
681static void esp_input_done_esn(struct crypto_async_request *base, int err)
682{
683 struct sk_buff *skb = base->data;
684
685 esp_input_restore_header(skb);
686 esp_input_done(base, err);
687}
688
689/*
690 * Note: detecting truncated vs. non-truncated authentication data is very
691 * expensive, so we only support truncated data, which is the recommended
692 * and common case.
693 */
694static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
695{
696 struct ip_esp_hdr *esph;
697 struct crypto_aead *aead = x->data;
698 struct aead_request *req;
699 struct sk_buff *trailer;
700 int ivlen = crypto_aead_ivsize(aead);
701 int elen = skb->len - sizeof(*esph) - ivlen;
702 int nfrags;
703 int assoclen;
704 int seqhilen;
705 __be32 *seqhi;
706 void *tmp;
707 u8 *iv;
708 struct scatterlist *sg;
709 int err = -EINVAL;
710
711 if (!pskb_may_pull(skb, sizeof(*esph) + ivlen))
712 goto out;
713
714 if (elen <= 0)
715 goto out;
716
717 assoclen = sizeof(*esph);
718 seqhilen = 0;
719
720 if (x->props.flags & XFRM_STATE_ESN) {
721 seqhilen += sizeof(__be32);
722 assoclen += seqhilen;
723 }
724
725 if (!skb_cloned(skb)) {
726 if (!skb_is_nonlinear(skb)) {
727 nfrags = 1;
728
729 goto skip_cow;
730 } else if (!skb_has_frag_list(skb)) {
731 nfrags = skb_shinfo(skb)->nr_frags;
732 nfrags++;
733
734 goto skip_cow;
735 }
736 }
737
738 err = skb_cow_data(skb, 0, &trailer);
739 if (err < 0)
740 goto out;
741
742 nfrags = err;
743
744skip_cow:
745 err = -ENOMEM;
746 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
747 if (!tmp)
748 goto out;
749
750 ESP_SKB_CB(skb)->tmp = tmp;
751 seqhi = esp_tmp_extra(tmp);
752 iv = esp_tmp_iv(aead, tmp, seqhilen);
753 req = esp_tmp_req(aead, iv);
754 sg = esp_req_sg(aead, req);
755
756 esp_input_set_header(skb, seqhi);
757
758 sg_init_table(sg, nfrags);
759 err = skb_to_sgvec(skb, sg, 0, skb->len);
760 if (unlikely(err < 0)) {
761 kfree(tmp);
762 goto out;
763 }
764
765 skb->ip_summed = CHECKSUM_NONE;
766
767 if ((x->props.flags & XFRM_STATE_ESN))
768 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
769 else
770 aead_request_set_callback(req, 0, esp_input_done, skb);
771
772 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
773 aead_request_set_ad(req, assoclen);
774
775 err = crypto_aead_decrypt(req);
776 if (err == -EINPROGRESS)
777 goto out;
778
779 if ((x->props.flags & XFRM_STATE_ESN))
780 esp_input_restore_header(skb);
781
782 err = esp_input_done2(skb, err);
783
784out:
785 return err;
786}
787
788static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
789{
790 struct crypto_aead *aead = x->data;
791 u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
792 unsigned int net_adj;
793
794 switch (x->props.mode) {
795 case XFRM_MODE_TRANSPORT:
796 case XFRM_MODE_BEET:
797 net_adj = sizeof(struct iphdr);
798 break;
799 case XFRM_MODE_TUNNEL:
800 net_adj = 0;
801 break;
802 default:
803 BUG();
804 }
805
806 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
807 net_adj) & ~(blksize - 1)) + net_adj - 2;
808}
809
810static int esp4_err(struct sk_buff *skb, u32 info)
811{
812 struct net *net = dev_net(skb->dev);
813 const struct iphdr *iph = (const struct iphdr *)skb->data;
814 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
815 struct xfrm_state *x;
816
817 switch (icmp_hdr(skb)->type) {
818 case ICMP_DEST_UNREACH:
819 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
820 return 0;
821 case ICMP_REDIRECT:
822 break;
823 default:
824 return 0;
825 }
826
827 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
828 esph->spi, IPPROTO_ESP, AF_INET);
829 if (!x)
830 return 0;
831
832 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
833 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
834 else
835 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
836 xfrm_state_put(x);
837
838 return 0;
839}
840
841static void esp_destroy(struct xfrm_state *x)
842{
843 struct crypto_aead *aead = x->data;
844
845 if (!aead)
846 return;
847
848 crypto_free_aead(aead);
849}
850
851static int esp_init_aead(struct xfrm_state *x)
852{
853 char aead_name[CRYPTO_MAX_ALG_NAME];
854 struct crypto_aead *aead;
855 int err;
856
857 err = -ENAMETOOLONG;
858 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
859 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
860 goto error;
861
862 aead = crypto_alloc_aead(aead_name, 0, 0);
863 err = PTR_ERR(aead);
864 if (IS_ERR(aead))
865 goto error;
866
867 x->data = aead;
868
869 err = crypto_aead_setkey(aead, x->aead->alg_key,
870 (x->aead->alg_key_len + 7) / 8);
871 if (err)
872 goto error;
873
874 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
875 if (err)
876 goto error;
877
878error:
879 return err;
880}
881
882static int esp_init_authenc(struct xfrm_state *x)
883{
884 struct crypto_aead *aead;
885 struct crypto_authenc_key_param *param;
886 struct rtattr *rta;
887 char *key;
888 char *p;
889 char authenc_name[CRYPTO_MAX_ALG_NAME];
890 unsigned int keylen;
891 int err;
892
893 err = -EINVAL;
894 if (!x->ealg)
895 goto error;
896
897 err = -ENAMETOOLONG;
898
899 if ((x->props.flags & XFRM_STATE_ESN)) {
900 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
901 "%s%sauthencesn(%s,%s)%s",
902 x->geniv ?: "", x->geniv ? "(" : "",
903 x->aalg ? x->aalg->alg_name : "digest_null",
904 x->ealg->alg_name,
905 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
906 goto error;
907 } else {
908 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
909 "%s%sauthenc(%s,%s)%s",
910 x->geniv ?: "", x->geniv ? "(" : "",
911 x->aalg ? x->aalg->alg_name : "digest_null",
912 x->ealg->alg_name,
913 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
914 goto error;
915 }
916
917 aead = crypto_alloc_aead(authenc_name, 0, 0);
918 err = PTR_ERR(aead);
919 if (IS_ERR(aead))
920 goto error;
921
922 x->data = aead;
923
924 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
925 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
926 err = -ENOMEM;
927 key = kmalloc(keylen, GFP_KERNEL);
928 if (!key)
929 goto error;
930
931 p = key;
932 rta = (void *)p;
933 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
934 rta->rta_len = RTA_LENGTH(sizeof(*param));
935 param = RTA_DATA(rta);
936 p += RTA_SPACE(sizeof(*param));
937
938 if (x->aalg) {
939 struct xfrm_algo_desc *aalg_desc;
940
941 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
942 p += (x->aalg->alg_key_len + 7) / 8;
943
944 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
945 BUG_ON(!aalg_desc);
946
947 err = -EINVAL;
948 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
949 crypto_aead_authsize(aead)) {
950 pr_info("ESP: %s digestsize %u != %hu\n",
951 x->aalg->alg_name,
952 crypto_aead_authsize(aead),
953 aalg_desc->uinfo.auth.icv_fullbits / 8);
954 goto free_key;
955 }
956
957 err = crypto_aead_setauthsize(
958 aead, x->aalg->alg_trunc_len / 8);
959 if (err)
960 goto free_key;
961 }
962
963 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
964 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
965
966 err = crypto_aead_setkey(aead, key, keylen);
967
968free_key:
969 kfree(key);
970
971error:
972 return err;
973}
974
975static int esp_init_state(struct xfrm_state *x)
976{
977 struct crypto_aead *aead;
978 u32 align;
979 int err;
980
981 x->data = NULL;
982
983 if (x->aead)
984 err = esp_init_aead(x);
985 else
986 err = esp_init_authenc(x);
987
988 if (err)
989 goto error;
990
991 aead = x->data;
992
993 x->props.header_len = sizeof(struct ip_esp_hdr) +
994 crypto_aead_ivsize(aead);
995 if (x->props.mode == XFRM_MODE_TUNNEL)
996 x->props.header_len += sizeof(struct iphdr);
997 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
998 x->props.header_len += IPV4_BEET_PHMAXLEN;
999 if (x->encap) {
1000 struct xfrm_encap_tmpl *encap = x->encap;
1001
1002 switch (encap->encap_type) {
1003 default:
1004 err = -EINVAL;
1005 goto error;
1006 case UDP_ENCAP_ESPINUDP:
1007 x->props.header_len += sizeof(struct udphdr);
1008 break;
1009 case UDP_ENCAP_ESPINUDP_NON_IKE:
1010 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1011 break;
1012 }
1013 }
1014
1015 align = ALIGN(crypto_aead_blocksize(aead), 4);
1016 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1017
1018error:
1019 return err;
1020}
1021
1022static int esp4_rcv_cb(struct sk_buff *skb, int err)
1023{
1024 return 0;
1025}
1026
1027static const struct xfrm_type esp_type =
1028{
1029 .description = "ESP4",
1030 .owner = THIS_MODULE,
1031 .proto = IPPROTO_ESP,
1032 .flags = XFRM_TYPE_REPLAY_PROT,
1033 .init_state = esp_init_state,
1034 .destructor = esp_destroy,
1035 .get_mtu = esp4_get_mtu,
1036 .input = esp_input,
1037 .output = esp_output,
1038};
1039
1040static struct xfrm4_protocol esp4_protocol = {
1041 .handler = xfrm4_rcv,
1042 .input_handler = xfrm_input,
1043 .cb_handler = esp4_rcv_cb,
1044 .err_handler = esp4_err,
1045 .priority = 0,
1046};
1047
1048static int __init esp4_init(void)
1049{
1050 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
1051 pr_info("%s: can't add xfrm type\n", __func__);
1052 return -EAGAIN;
1053 }
1054 if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
1055 pr_info("%s: can't add protocol\n", __func__);
1056 xfrm_unregister_type(&esp_type, AF_INET);
1057 return -EAGAIN;
1058 }
1059 return 0;
1060}
1061
1062static void __exit esp4_fini(void)
1063{
1064 if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
1065 pr_info("%s: can't remove protocol\n", __func__);
1066 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
1067 pr_info("%s: can't remove xfrm type\n", __func__);
1068}
1069
1070module_init(esp4_init);
1071module_exit(esp4_fini);
1072MODULE_LICENSE("GPL");
1073MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);