blob: a07b586dcba32375d760cc4c06310e3fcab038d5 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on: net/ipv4/ip_fragment.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16/*
17 * Fixes:
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
20 *
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
25 * David Stevens and
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
28 */
29
30#define pr_fmt(fmt) "IPv6: " fmt
31
32#include <linux/errno.h>
33#include <linux/types.h>
34#include <linux/string.h>
35#include <linux/socket.h>
36#include <linux/sockios.h>
37#include <linux/jiffies.h>
38#include <linux/net.h>
39#include <linux/list.h>
40#include <linux/netdevice.h>
41#include <linux/in6.h>
42#include <linux/ipv6.h>
43#include <linux/icmpv6.h>
44#include <linux/random.h>
45#include <linux/jhash.h>
46#include <linux/skbuff.h>
47#include <linux/slab.h>
48#include <linux/export.h>
49
50#include <net/sock.h>
51#include <net/snmp.h>
52
53#include <net/ipv6.h>
54#include <net/ip6_route.h>
55#include <net/protocol.h>
56#include <net/transp_v6.h>
57#include <net/rawv6.h>
58#include <net/ndisc.h>
59#include <net/addrconf.h>
60#include <net/inet_frag.h>
61
62struct ip6frag_skb_cb
63{
64 struct inet6_skb_parm h;
65 int offset;
66};
67
68#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
69
70
71/*
72 * Equivalent of ipv4 struct ipq
73 */
74
75struct frag_queue
76{
77 struct inet_frag_queue q;
78
79 __be32 id; /* fragment id */
80 u32 user;
81 struct in6_addr saddr;
82 struct in6_addr daddr;
83
84 int iif;
85 unsigned int csum;
86 __u16 nhoffset;
87};
88
89static struct inet_frags ip6_frags;
90
91int ip6_frag_nqueues(struct net *net)
92{
93 return net->ipv6.frags.nqueues;
94}
95
96int ip6_frag_mem(struct net *net)
97{
98 return atomic_read(&net->ipv6.frags.mem);
99}
100
101static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
102 struct net_device *dev);
103
104/*
105 * callers should be careful not to use the hash value outside the ipfrag_lock
106 * as doing so could race with ipfrag_hash_rnd being recalculated.
107 */
108unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
109 const struct in6_addr *daddr, u32 rnd)
110{
111 u32 c;
112
113 c = jhash_3words((__force u32)saddr->s6_addr32[0],
114 (__force u32)saddr->s6_addr32[1],
115 (__force u32)saddr->s6_addr32[2],
116 rnd);
117
118 c = jhash_3words((__force u32)saddr->s6_addr32[3],
119 (__force u32)daddr->s6_addr32[0],
120 (__force u32)daddr->s6_addr32[1],
121 c);
122
123 c = jhash_3words((__force u32)daddr->s6_addr32[2],
124 (__force u32)daddr->s6_addr32[3],
125 (__force u32)id,
126 c);
127
128 return c & (INETFRAGS_HASHSZ - 1);
129}
130EXPORT_SYMBOL_GPL(inet6_hash_frag);
131
132static unsigned int ip6_hashfn(struct inet_frag_queue *q)
133{
134 struct frag_queue *fq;
135
136 fq = container_of(q, struct frag_queue, q);
137 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
138}
139
140int ip6_frag_match(struct inet_frag_queue *q, void *a)
141{
142 struct frag_queue *fq;
143 struct ip6_create_arg *arg = a;
144
145 fq = container_of(q, struct frag_queue, q);
146 return (fq->id == arg->id && fq->user == arg->user &&
147 ipv6_addr_equal(&fq->saddr, arg->src) &&
148 ipv6_addr_equal(&fq->daddr, arg->dst));
149}
150EXPORT_SYMBOL(ip6_frag_match);
151
152void ip6_frag_init(struct inet_frag_queue *q, void *a)
153{
154 struct frag_queue *fq = container_of(q, struct frag_queue, q);
155 struct ip6_create_arg *arg = a;
156
157 fq->id = arg->id;
158 fq->user = arg->user;
159 fq->saddr = *arg->src;
160 fq->daddr = *arg->dst;
161}
162EXPORT_SYMBOL(ip6_frag_init);
163
164/* Destruction primitives. */
165
166static __inline__ void fq_put(struct frag_queue *fq)
167{
168 inet_frag_put(&fq->q, &ip6_frags);
169}
170
171/* Kill fq entry. It is not destroyed immediately,
172 * because caller (and someone more) holds reference count.
173 */
174static __inline__ void fq_kill(struct frag_queue *fq)
175{
176 inet_frag_kill(&fq->q, &ip6_frags);
177}
178
179static void ip6_evictor(struct net *net, struct inet6_dev *idev)
180{
181 int evicted;
182
183 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
184 if (evicted)
185 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
186}
187
188static void ip6_frag_expire(unsigned long data)
189{
190 struct frag_queue *fq;
191 struct net_device *dev = NULL;
192 struct net *net;
193
194 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
195
196 spin_lock(&fq->q.lock);
197
198 if (fq->q.last_in & INET_FRAG_COMPLETE)
199 goto out;
200
201 fq_kill(fq);
202
203 net = container_of(fq->q.net, struct net, ipv6.frags);
204 rcu_read_lock();
205 dev = dev_get_by_index_rcu(net, fq->iif);
206 if (!dev)
207 goto out_rcu_unlock;
208
209 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
210 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
211
212 /* Don't send error if the first segment did not arrive. */
213 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
214 goto out_rcu_unlock;
215
216 /*
217 But use as source device on which LAST ARRIVED
218 segment was received. And do not use fq->dev
219 pointer directly, device might already disappeared.
220 */
221 fq->q.fragments->dev = dev;
222 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
223out_rcu_unlock:
224 rcu_read_unlock();
225out:
226 spin_unlock(&fq->q.lock);
227 fq_put(fq);
228}
229
230static __inline__ struct frag_queue *
231fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
232{
233 struct inet_frag_queue *q;
234 struct ip6_create_arg arg;
235 unsigned int hash;
236
237 arg.id = id;
238 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
239 arg.src = src;
240 arg.dst = dst;
241
242 read_lock(&ip6_frags.lock);
243 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
244
245 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
246 if (IS_ERR_OR_NULL(q)) {
247 inet_frag_maybe_warn_overflow(q, pr_fmt());
248 return NULL;
249 }
250 return container_of(q, struct frag_queue, q);
251}
252
253static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
254 struct frag_hdr *fhdr, int nhoff)
255{
256 struct sk_buff *prev, *next;
257 struct net_device *dev;
258 int offset, end;
259 struct net *net = dev_net(skb_dst(skb)->dev);
260
261 if (fq->q.last_in & INET_FRAG_COMPLETE)
262 goto err;
263
264 offset = ntohs(fhdr->frag_off) & ~0x7;
265 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
266 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
267
268 if ((unsigned int)end > IPV6_MAXPLEN) {
269 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
270 IPSTATS_MIB_INHDRERRORS);
271 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
272 ((u8 *)&fhdr->frag_off -
273 skb_network_header(skb)));
274 return -1;
275 }
276
277 if (skb->ip_summed == CHECKSUM_COMPLETE) {
278 const unsigned char *nh = skb_network_header(skb);
279 skb->csum = csum_sub(skb->csum,
280 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
281 0));
282 }
283
284 /* Is this the final fragment? */
285 if (!(fhdr->frag_off & htons(IP6_MF))) {
286 /* If we already have some bits beyond end
287 * or have different end, the segment is corrupted.
288 */
289 if (end < fq->q.len ||
290 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
291 goto err;
292 fq->q.last_in |= INET_FRAG_LAST_IN;
293 fq->q.len = end;
294 } else {
295 /* Check if the fragment is rounded to 8 bytes.
296 * Required by the RFC.
297 */
298 if (end & 0x7) {
299 /* RFC2460 says always send parameter problem in
300 * this case. -DaveM
301 */
302 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
303 IPSTATS_MIB_INHDRERRORS);
304 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
305 offsetof(struct ipv6hdr, payload_len));
306 return -1;
307 }
308 if (end > fq->q.len) {
309 /* Some bits beyond end -> corruption. */
310 if (fq->q.last_in & INET_FRAG_LAST_IN)
311 goto err;
312 fq->q.len = end;
313 }
314 }
315
316 if (end == offset)
317 goto err;
318
319 /* Point into the IP datagram 'data' part. */
320 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
321 goto err;
322
323 if (pskb_trim_rcsum(skb, end - offset))
324 goto err;
325
326 /* Find out which fragments are in front and at the back of us
327 * in the chain of fragments so far. We must know where to put
328 * this fragment, right?
329 */
330 prev = fq->q.fragments_tail;
331 if (!prev || FRAG6_CB(prev)->offset < offset) {
332 next = NULL;
333 goto found;
334 }
335 prev = NULL;
336 for(next = fq->q.fragments; next != NULL; next = next->next) {
337 if (FRAG6_CB(next)->offset >= offset)
338 break; /* bingo! */
339 prev = next;
340 }
341
342found:
343 /* RFC5722, Section 4, amended by Errata ID : 3089
344 * When reassembling an IPv6 datagram, if
345 * one or more its constituent fragments is determined to be an
346 * overlapping fragment, the entire datagram (and any constituent
347 * fragments) MUST be silently discarded.
348 */
349
350 /* Check for overlap with preceding fragment. */
351 if (prev &&
352 (FRAG6_CB(prev)->offset + prev->len) > offset)
353 goto discard_fq;
354
355 /* Look for overlap with succeeding segment. */
356 if (next && FRAG6_CB(next)->offset < end)
357 goto discard_fq;
358
359 FRAG6_CB(skb)->offset = offset;
360
361 /* Insert this fragment in the chain of fragments. */
362 skb->next = next;
363 if (!next)
364 fq->q.fragments_tail = skb;
365 if (prev)
366 prev->next = skb;
367 else
368 fq->q.fragments = skb;
369
370 dev = skb->dev;
371 if (dev) {
372 fq->iif = dev->ifindex;
373 skb->dev = NULL;
374 }
375 fq->q.stamp = skb->tstamp;
376 fq->q.meat += skb->len;
377 atomic_add(skb->truesize, &fq->q.net->mem);
378
379 /* The first fragment.
380 * nhoffset is obtained from the first fragment, of course.
381 */
382 if (offset == 0) {
383 fq->nhoffset = nhoff;
384 fq->q.last_in |= INET_FRAG_FIRST_IN;
385 }
386
387 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
388 fq->q.meat == fq->q.len) {
389 int res;
390 unsigned long orefdst = skb->_skb_refdst;
391
392 skb->_skb_refdst = 0UL;
393 res = ip6_frag_reasm(fq, prev, dev);
394 skb->_skb_refdst = orefdst;
395 return res;
396 }
397
398 skb_dst_drop(skb);
399
400 inet_frag_lru_move(&fq->q); //hub:CVE-2014-0100
401 return -1;
402
403discard_fq:
404 fq_kill(fq);
405err:
406 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
407 IPSTATS_MIB_REASMFAILS);
408 kfree_skb(skb);
409 return -1;
410}
411
412/*
413 * Check if this packet is complete.
414 * Returns NULL on failure by any reason, and pointer
415 * to current nexthdr field in reassembled frame.
416 *
417 * It is called with locked fq, and caller must check that
418 * queue is eligible for reassembly i.e. it is not COMPLETE,
419 * the last and the first frames arrived and all the bits are here.
420 */
421static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
422 struct net_device *dev)
423{
424 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
425 struct sk_buff *fp, *head = fq->q.fragments;
426 int payload_len;
427 unsigned int nhoff;
428
429 fq_kill(fq);
430
431 /* Make the one we just received the head. */
432 if (prev) {
433 head = prev->next;
434 fp = skb_clone(head, GFP_ATOMIC);
435
436 if (!fp)
437 goto out_oom;
438
439 fp->next = head->next;
440 if (!fp->next)
441 fq->q.fragments_tail = fp;
442 prev->next = fp;
443
444 skb_morph(head, fq->q.fragments);
445 head->next = fq->q.fragments->next;
446
447 kfree_skb(fq->q.fragments);
448 fq->q.fragments = head;
449 }
450
451 WARN_ON(head == NULL);
452 WARN_ON(FRAG6_CB(head)->offset != 0);
453
454 /* Unfragmented part is taken from the first segment. */
455 payload_len = ((head->data - skb_network_header(head)) -
456 sizeof(struct ipv6hdr) + fq->q.len -
457 sizeof(struct frag_hdr));
458 if (payload_len > IPV6_MAXPLEN)
459 goto out_oversize;
460
461 /* Head of list must not be cloned. */
462 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
463 goto out_oom;
464
465 /* If the first fragment is fragmented itself, we split
466 * it to two chunks: the first with data and paged part
467 * and the second, holding only fragments. */
468 if (skb_has_frag_list(head)) {
469 struct sk_buff *clone;
470 int i, plen = 0;
471
472 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
473 goto out_oom;
474 clone->next = head->next;
475 head->next = clone;
476 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
477 skb_frag_list_init(head);
478 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
479 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
480 clone->len = clone->data_len = head->data_len - plen;
481 head->data_len -= clone->len;
482 head->len -= clone->len;
483 clone->csum = 0;
484 clone->ip_summed = head->ip_summed;
485 atomic_add(clone->truesize, &fq->q.net->mem);
486 }
487
488 /* We have to remove fragment header from datagram and to relocate
489 * header in order to calculate ICV correctly. */
490 nhoff = fq->nhoffset;
491 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
492 memmove(head->head + sizeof(struct frag_hdr), head->head,
493 (head->data - head->head) - sizeof(struct frag_hdr));
494 head->mac_header += sizeof(struct frag_hdr);
495 head->network_header += sizeof(struct frag_hdr);
496
497 skb_shinfo(head)->frag_list = head->next;
498 skb_reset_transport_header(head);
499 skb_push(head, head->data - skb_network_header(head));
500
501 for (fp=head->next; fp; fp = fp->next) {
502 head->data_len += fp->len;
503 head->len += fp->len;
504 if (head->ip_summed != fp->ip_summed)
505 head->ip_summed = CHECKSUM_NONE;
506 else if (head->ip_summed == CHECKSUM_COMPLETE)
507 head->csum = csum_add(head->csum, fp->csum);
508 head->truesize += fp->truesize;
509 }
510 atomic_sub(head->truesize, &fq->q.net->mem);
511
512 head->next = NULL;
513 head->dev = dev;
514 head->tstamp = fq->q.stamp;
515 ipv6_hdr(head)->payload_len = htons(payload_len);
516 IP6CB(head)->nhoff = nhoff;
517 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
518
519 /* Yes, and fold redundant checksum back. 8) */
520 if (head->ip_summed == CHECKSUM_COMPLETE)
521 head->csum = csum_partial(skb_network_header(head),
522 skb_network_header_len(head),
523 head->csum);
524
525 rcu_read_lock();
526 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
527 rcu_read_unlock();
528 fq->q.fragments = NULL;
529 fq->q.fragments_tail = NULL;
530 return 1;
531
532out_oversize:
533 if (net_ratelimit())
534 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
535 goto out_fail;
536out_oom:
537 if (net_ratelimit())
538 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
539out_fail:
540 rcu_read_lock();
541 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
542 rcu_read_unlock();
543 return -1;
544}
545
546static int ipv6_frag_rcv(struct sk_buff *skb)
547{
548 struct frag_hdr *fhdr;
549 struct frag_queue *fq;
550 const struct ipv6hdr *hdr = ipv6_hdr(skb);
551 struct net *net = dev_net(skb_dst(skb)->dev);
552
553 if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
554 goto fail_hdr;
555
556 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
557
558 /* Jumbo payload inhibits frag. header */
559 if (hdr->payload_len==0)
560 goto fail_hdr;
561
562 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
563 sizeof(struct frag_hdr))))
564 goto fail_hdr;
565
566 hdr = ipv6_hdr(skb);
567 fhdr = (struct frag_hdr *)skb_transport_header(skb);
568
569 if (!(fhdr->frag_off & htons(0xFFF9))) {
570 /* It is not a fragmented frame */
571 skb->transport_header += sizeof(struct frag_hdr);
572 IP6_INC_STATS_BH(net,
573 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
574
575 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
576 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
577 return 1;
578 }
579
580 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
581 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
582
583 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
584 if (fq != NULL) {
585 int ret;
586
587 spin_lock(&fq->q.lock);
588
589 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
590
591 spin_unlock(&fq->q.lock);
592 fq_put(fq);
593 return ret;
594 }
595
596 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
597 kfree_skb(skb);
598 return -1;
599
600fail_hdr:
601 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
602 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
603 return -1;
604}
605
606static const struct inet6_protocol frag_protocol =
607{
608 .handler = ipv6_frag_rcv,
609 .flags = INET6_PROTO_NOPOLICY,
610};
611
612#ifdef CONFIG_SYSCTL
613static struct ctl_table ip6_frags_ns_ctl_table[] = {
614 {
615 .procname = "ip6frag_high_thresh",
616 .data = &init_net.ipv6.frags.high_thresh,
617 .maxlen = sizeof(int),
618 .mode = 0644,
619 .proc_handler = proc_dointvec
620 },
621 {
622 .procname = "ip6frag_low_thresh",
623 .data = &init_net.ipv6.frags.low_thresh,
624 .maxlen = sizeof(int),
625 .mode = 0644,
626 .proc_handler = proc_dointvec
627 },
628 {
629 .procname = "ip6frag_time",
630 .data = &init_net.ipv6.frags.timeout,
631 .maxlen = sizeof(int),
632 .mode = 0644,
633 .proc_handler = proc_dointvec_jiffies,
634 },
635 { }
636};
637
638static struct ctl_table ip6_frags_ctl_table[] = {
639 {
640 .procname = "ip6frag_secret_interval",
641 .data = &ip6_frags.secret_interval,
642 .maxlen = sizeof(int),
643 .mode = 0644,
644 .proc_handler = proc_dointvec_jiffies,
645 },
646 { }
647};
648
649static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
650{
651 struct ctl_table *table;
652 struct ctl_table_header *hdr;
653
654 table = ip6_frags_ns_ctl_table;
655 if (!net_eq(net, &init_net)) {
656 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
657 if (table == NULL)
658 goto err_alloc;
659
660 table[0].data = &net->ipv6.frags.high_thresh;
661 table[1].data = &net->ipv6.frags.low_thresh;
662 table[2].data = &net->ipv6.frags.timeout;
663 }
664
665 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
666 if (hdr == NULL)
667 goto err_reg;
668
669 net->ipv6.sysctl.frags_hdr = hdr;
670 return 0;
671
672err_reg:
673 if (!net_eq(net, &init_net))
674 kfree(table);
675err_alloc:
676 return -ENOMEM;
677}
678
679static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
680{
681 struct ctl_table *table;
682
683 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
684 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
685 if (!net_eq(net, &init_net))
686 kfree(table);
687}
688
689static struct ctl_table_header *ip6_ctl_header;
690
691static int ip6_frags_sysctl_register(void)
692{
693 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
694 ip6_frags_ctl_table);
695 return ip6_ctl_header == NULL ? -ENOMEM : 0;
696}
697
698static void ip6_frags_sysctl_unregister(void)
699{
700 unregister_net_sysctl_table(ip6_ctl_header);
701}
702#else
703static inline int ip6_frags_ns_sysctl_register(struct net *net)
704{
705 return 0;
706}
707
708static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
709{
710}
711
712static inline int ip6_frags_sysctl_register(void)
713{
714 return 0;
715}
716
717static inline void ip6_frags_sysctl_unregister(void)
718{
719}
720#endif
721
722static int __net_init ipv6_frags_init_net(struct net *net)
723{
724 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
725 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
726 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
727
728 inet_frags_init_net(&net->ipv6.frags);
729
730 return ip6_frags_ns_sysctl_register(net);
731}
732
733static void __net_exit ipv6_frags_exit_net(struct net *net)
734{
735 ip6_frags_ns_sysctl_unregister(net);
736 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
737}
738
739static struct pernet_operations ip6_frags_ops = {
740 .init = ipv6_frags_init_net,
741 .exit = ipv6_frags_exit_net,
742};
743
744int __init ipv6_frag_init(void)
745{
746 int ret;
747
748 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
749 if (ret)
750 goto out;
751
752 ret = ip6_frags_sysctl_register();
753 if (ret)
754 goto err_sysctl;
755
756 ret = register_pernet_subsys(&ip6_frags_ops);
757 if (ret)
758 goto err_pernet;
759
760 ip6_frags.hashfn = ip6_hashfn;
761 ip6_frags.constructor = ip6_frag_init;
762 ip6_frags.destructor = NULL;
763 ip6_frags.skb_free = NULL;
764 ip6_frags.qsize = sizeof(struct frag_queue);
765 ip6_frags.match = ip6_frag_match;
766 ip6_frags.frag_expire = ip6_frag_expire;
767 ip6_frags.secret_interval = 10 * 60 * HZ;
768 inet_frags_init(&ip6_frags);
769out:
770 return ret;
771
772err_pernet:
773 ip6_frags_sysctl_unregister();
774err_sysctl:
775 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
776 goto out;
777}
778
779void ipv6_frag_exit(void)
780{
781 inet_frags_fini(&ip6_frags);
782 ip6_frags_sysctl_unregister();
783 unregister_pernet_subsys(&ip6_frags_ops);
784 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
785}