blob: 12ef3cb26676d25b0eeb7e52a789264a45c42dc7 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * inet fragments management
4 *
5 * Authors: Pavel Emelyanov <xemul@openvz.org>
6 * Started as consolidation of ipv4/ip_fragment.c,
7 * ipv6/reassembly. and ipv6 nf conntrack reassembly
8 */
9
10#include <linux/list.h>
11#include <linux/spinlock.h>
12#include <linux/module.h>
13#include <linux/timer.h>
14#include <linux/mm.h>
15#include <linux/random.h>
16#include <linux/skbuff.h>
17#include <linux/rtnetlink.h>
18#include <linux/slab.h>
19#include <linux/rhashtable.h>
20
21#include <net/sock.h>
22#include <net/inet_frag.h>
23#include <net/inet_ecn.h>
24#include <net/ip.h>
25#include <net/ipv6.h>
26
27#include "../core/sock_destructor.h"
28
29/* Use skb->cb to track consecutive/adjacent fragments coming at
30 * the end of the queue. Nodes in the rb-tree queue will
31 * contain "runs" of one or more adjacent fragments.
32 *
33 * Invariants:
34 * - next_frag is NULL at the tail of a "run";
35 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
36 */
37struct ipfrag_skb_cb {
38 union {
39 struct inet_skb_parm h4;
40 struct inet6_skb_parm h6;
41 };
42 struct sk_buff *next_frag;
43 int frag_run_len;
44 int ip_defrag_offset;
45};
46
47#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
48
49static void fragcb_clear(struct sk_buff *skb)
50{
51 RB_CLEAR_NODE(&skb->rbnode);
52 FRAG_CB(skb)->next_frag = NULL;
53 FRAG_CB(skb)->frag_run_len = skb->len;
54}
55
56/* Append skb to the last "run". */
57static void fragrun_append_to_last(struct inet_frag_queue *q,
58 struct sk_buff *skb)
59{
60 fragcb_clear(skb);
61
62 FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
63 FRAG_CB(q->fragments_tail)->next_frag = skb;
64 q->fragments_tail = skb;
65}
66
67/* Create a new "run" with the skb. */
68static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
69{
70 BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
71 fragcb_clear(skb);
72
73 if (q->last_run_head)
74 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
75 &q->last_run_head->rbnode.rb_right);
76 else
77 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
78 rb_insert_color(&skb->rbnode, &q->rb_fragments);
79
80 q->fragments_tail = skb;
81 q->last_run_head = skb;
82}
83
84/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
85 * Value : 0xff if frame should be dropped.
86 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
87 */
88const u8 ip_frag_ecn_table[16] = {
89 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
90 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
91 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
92 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
93
94 /* invalid combinations : drop frame */
95 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
96 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
97 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
98 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
100 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
101 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
102};
103EXPORT_SYMBOL(ip_frag_ecn_table);
104
105int inet_frags_init(struct inet_frags *f)
106{
107 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
108 NULL);
109 if (!f->frags_cachep)
110 return -ENOMEM;
111
112 refcount_set(&f->refcnt, 1);
113 init_completion(&f->completion);
114 return 0;
115}
116EXPORT_SYMBOL(inet_frags_init);
117
118void inet_frags_fini(struct inet_frags *f)
119{
120 if (refcount_dec_and_test(&f->refcnt))
121 complete(&f->completion);
122
123 wait_for_completion(&f->completion);
124
125 kmem_cache_destroy(f->frags_cachep);
126 f->frags_cachep = NULL;
127}
128EXPORT_SYMBOL(inet_frags_fini);
129
130/* called from rhashtable_free_and_destroy() at netns_frags dismantle */
131static void inet_frags_free_cb(void *ptr, void *arg)
132{
133 struct inet_frag_queue *fq = ptr;
134 int count;
135
136 count = del_timer_sync(&fq->timer) ? 1 : 0;
137
138 spin_lock_bh(&fq->lock);
139 if (!(fq->flags & INET_FRAG_COMPLETE)) {
140 fq->flags |= INET_FRAG_COMPLETE;
141 count++;
142 } else if (fq->flags & INET_FRAG_HASH_DEAD) {
143 count++;
144 }
145 spin_unlock_bh(&fq->lock);
146
147 if (refcount_sub_and_test(count, &fq->refcnt))
148 inet_frag_destroy(fq);
149}
150
151static void fqdir_work_fn(struct work_struct *work)
152{
153 struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
154 struct inet_frags *f = fqdir->f;
155
156 rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
157
158 /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
159 * have completed, since they need to dereference fqdir.
160 * Would it not be nice to have kfree_rcu_barrier() ? :)
161 */
162 rcu_barrier();
163
164 if (refcount_dec_and_test(&f->refcnt))
165 complete(&f->completion);
166
167 kfree(fqdir);
168}
169
170int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
171{
172 struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
173 int res;
174
175 if (!fqdir)
176 return -ENOMEM;
177 fqdir->f = f;
178 fqdir->net = net;
179 res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
180 if (res < 0) {
181 kfree(fqdir);
182 return res;
183 }
184 refcount_inc(&f->refcnt);
185 *fqdirp = fqdir;
186 return 0;
187}
188EXPORT_SYMBOL(fqdir_init);
189
190void fqdir_exit(struct fqdir *fqdir)
191{
192 INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
193 queue_work(system_wq, &fqdir->destroy_work);
194}
195EXPORT_SYMBOL(fqdir_exit);
196
197void inet_frag_kill(struct inet_frag_queue *fq)
198{
199 if (del_timer(&fq->timer))
200 refcount_dec(&fq->refcnt);
201
202 if (!(fq->flags & INET_FRAG_COMPLETE)) {
203 struct fqdir *fqdir = fq->fqdir;
204
205 fq->flags |= INET_FRAG_COMPLETE;
206 rcu_read_lock();
207 /* The RCU read lock provides a memory barrier
208 * guaranteeing that if fqdir->dead is false then
209 * the hash table destruction will not start until
210 * after we unlock. Paired with fqdir_pre_exit().
211 */
212 if (!READ_ONCE(fqdir->dead)) {
213 rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
214 fqdir->f->rhash_params);
215 refcount_dec(&fq->refcnt);
216 } else {
217 fq->flags |= INET_FRAG_HASH_DEAD;
218 }
219 rcu_read_unlock();
220 }
221}
222EXPORT_SYMBOL(inet_frag_kill);
223
224static void inet_frag_destroy_rcu(struct rcu_head *head)
225{
226 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
227 rcu);
228 struct inet_frags *f = q->fqdir->f;
229
230 if (f->destructor)
231 f->destructor(q);
232 kmem_cache_free(f->frags_cachep, q);
233}
234
235unsigned int inet_frag_rbtree_purge(struct rb_root *root)
236{
237 struct rb_node *p = rb_first(root);
238 unsigned int sum = 0;
239
240 while (p) {
241 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
242
243 p = rb_next(p);
244 rb_erase(&skb->rbnode, root);
245 while (skb) {
246 struct sk_buff *next = FRAG_CB(skb)->next_frag;
247
248 sum += skb->truesize;
249 kfree_skb(skb);
250 skb = next;
251 }
252 }
253 return sum;
254}
255EXPORT_SYMBOL(inet_frag_rbtree_purge);
256
257void inet_frag_destroy(struct inet_frag_queue *q)
258{
259 struct fqdir *fqdir;
260 unsigned int sum, sum_truesize = 0;
261 struct inet_frags *f;
262
263 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
264 WARN_ON(del_timer(&q->timer) != 0);
265
266 /* Release all fragment data. */
267 fqdir = q->fqdir;
268 f = fqdir->f;
269 sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
270 sum = sum_truesize + f->qsize;
271
272 call_rcu(&q->rcu, inet_frag_destroy_rcu);
273
274 sub_frag_mem_limit(fqdir, sum);
275}
276EXPORT_SYMBOL(inet_frag_destroy);
277
278static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
279 struct inet_frags *f,
280 void *arg)
281{
282 struct inet_frag_queue *q;
283
284 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
285 if (!q)
286 return NULL;
287
288 q->fqdir = fqdir;
289 f->constructor(q, arg);
290 add_frag_mem_limit(fqdir, f->qsize);
291
292 timer_setup(&q->timer, f->frag_expire, 0);
293 spin_lock_init(&q->lock);
294 refcount_set(&q->refcnt, 3);
295
296 return q;
297}
298
299static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
300 void *arg,
301 struct inet_frag_queue **prev)
302{
303 struct inet_frags *f = fqdir->f;
304 struct inet_frag_queue *q;
305
306 q = inet_frag_alloc(fqdir, f, arg);
307 if (!q) {
308 *prev = ERR_PTR(-ENOMEM);
309 return NULL;
310 }
311 mod_timer(&q->timer, jiffies + fqdir->timeout);
312
313 *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
314 &q->node, f->rhash_params);
315 if (*prev) {
316 q->flags |= INET_FRAG_COMPLETE;
317 inet_frag_kill(q);
318 inet_frag_destroy(q);
319 return NULL;
320 }
321 return q;
322}
323
324/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
325struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
326{
327 /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
328 long high_thresh = READ_ONCE(fqdir->high_thresh);
329 struct inet_frag_queue *fq = NULL, *prev;
330
331 if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
332 return NULL;
333
334 rcu_read_lock();
335
336 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
337 if (!prev)
338 fq = inet_frag_create(fqdir, key, &prev);
339 if (!IS_ERR_OR_NULL(prev)) {
340 fq = prev;
341 if (!refcount_inc_not_zero(&fq->refcnt))
342 fq = NULL;
343 }
344 rcu_read_unlock();
345 return fq;
346}
347EXPORT_SYMBOL(inet_frag_find);
348
349int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
350 int offset, int end)
351{
352 struct sk_buff *last = q->fragments_tail;
353
354 /* RFC5722, Section 4, amended by Errata ID : 3089
355 * When reassembling an IPv6 datagram, if
356 * one or more its constituent fragments is determined to be an
357 * overlapping fragment, the entire datagram (and any constituent
358 * fragments) MUST be silently discarded.
359 *
360 * Duplicates, however, should be ignored (i.e. skb dropped, but the
361 * queue/fragments kept for later reassembly).
362 */
363 if (!last)
364 fragrun_create(q, skb); /* First fragment. */
365 else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
366 /* This is the common case: skb goes to the end. */
367 /* Detect and discard overlaps. */
368 if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
369 return IPFRAG_OVERLAP;
370 if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
371 fragrun_append_to_last(q, skb);
372 else
373 fragrun_create(q, skb);
374 } else {
375 /* Binary search. Note that skb can become the first fragment,
376 * but not the last (covered above).
377 */
378 struct rb_node **rbn, *parent;
379
380 rbn = &q->rb_fragments.rb_node;
381 do {
382 struct sk_buff *curr;
383 int curr_run_end;
384
385 parent = *rbn;
386 curr = rb_to_skb(parent);
387 curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
388 FRAG_CB(curr)->frag_run_len;
389 if (end <= FRAG_CB(curr)->ip_defrag_offset)
390 rbn = &parent->rb_left;
391 else if (offset >= curr_run_end)
392 rbn = &parent->rb_right;
393 else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
394 end <= curr_run_end)
395 return IPFRAG_DUP;
396 else
397 return IPFRAG_OVERLAP;
398 } while (*rbn);
399 /* Here we have parent properly set, and rbn pointing to
400 * one of its NULL left/right children. Insert skb.
401 */
402 fragcb_clear(skb);
403 rb_link_node(&skb->rbnode, parent, rbn);
404 rb_insert_color(&skb->rbnode, &q->rb_fragments);
405 }
406
407 FRAG_CB(skb)->ip_defrag_offset = offset;
408
409 return IPFRAG_OK;
410}
411EXPORT_SYMBOL(inet_frag_queue_insert);
412
413void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
414 struct sk_buff *parent)
415{
416 struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
417 void (*destructor)(struct sk_buff *);
418 unsigned int orig_truesize = 0;
419 struct sk_buff **nextp = NULL;
420 struct sock *sk = skb->sk;
421 int delta;
422
423 if (sk && is_skb_wmem(skb)) {
424 /* TX: skb->sk might have been passed as argument to
425 * dst->output and must remain valid until tx completes.
426 *
427 * Move sk to reassembled skb and fix up wmem accounting.
428 */
429 orig_truesize = skb->truesize;
430 destructor = skb->destructor;
431 }
432
433 if (head != skb) {
434 fp = skb_clone(skb, GFP_ATOMIC);
435 if (!fp) {
436 head = skb;
437 goto out_restore_sk;
438 }
439 FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
440 if (RB_EMPTY_NODE(&skb->rbnode))
441 FRAG_CB(parent)->next_frag = fp;
442 else
443 rb_replace_node(&skb->rbnode, &fp->rbnode,
444 &q->rb_fragments);
445 if (q->fragments_tail == skb)
446 q->fragments_tail = fp;
447
448 if (orig_truesize) {
449 /* prevent skb_morph from releasing sk */
450 skb->sk = NULL;
451 skb->destructor = NULL;
452 }
453 skb_morph(skb, head);
454 FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
455 rb_replace_node(&head->rbnode, &skb->rbnode,
456 &q->rb_fragments);
457 consume_skb(head);
458 head = skb;
459 }
460 WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
461
462 delta = -head->truesize;
463
464 /* Head of list must not be cloned. */
465 if (skb_unclone(head, GFP_ATOMIC))
466 goto out_restore_sk;
467
468 delta += head->truesize;
469 if (delta)
470 add_frag_mem_limit(q->fqdir, delta);
471
472 /* If the first fragment is fragmented itself, we split
473 * it to two chunks: the first with data and paged part
474 * and the second, holding only fragments.
475 */
476 if (skb_has_frag_list(head)) {
477 struct sk_buff *clone;
478 int i, plen = 0;
479
480 clone = alloc_skb(0, GFP_ATOMIC);
481 if (!clone)
482 goto out_restore_sk;
483 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
484 skb_frag_list_init(head);
485 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
486 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
487 clone->data_len = head->data_len - plen;
488 clone->len = clone->data_len;
489 head->truesize += clone->truesize;
490 clone->csum = 0;
491 clone->ip_summed = head->ip_summed;
492 add_frag_mem_limit(q->fqdir, clone->truesize);
493 skb_shinfo(head)->frag_list = clone;
494 nextp = &clone->next;
495 } else {
496 nextp = &skb_shinfo(head)->frag_list;
497 }
498
499out_restore_sk:
500 if (orig_truesize) {
501 int ts_delta = head->truesize - orig_truesize;
502
503 /* if this reassembled skb is fragmented later,
504 * fraglist skbs will get skb->sk assigned from head->sk,
505 * and each frag skb will be released via sock_wfree.
506 *
507 * Update sk_wmem_alloc.
508 */
509 head->sk = sk;
510 head->destructor = destructor;
511 refcount_add(ts_delta, &sk->sk_wmem_alloc);
512 }
513
514 return nextp;
515}
516EXPORT_SYMBOL(inet_frag_reasm_prepare);
517
518void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
519 void *reasm_data, bool try_coalesce)
520{
521 struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
522 const unsigned int head_truesize = head->truesize;
523 struct sk_buff **nextp = (struct sk_buff **)reasm_data;
524 struct rb_node *rbn;
525 struct sk_buff *fp;
526 int sum_truesize;
527
528 skb_push(head, head->data - skb_network_header(head));
529
530 /* Traverse the tree in order, to build frag_list. */
531 fp = FRAG_CB(head)->next_frag;
532 rbn = rb_next(&head->rbnode);
533 rb_erase(&head->rbnode, &q->rb_fragments);
534
535 sum_truesize = head->truesize;
536 while (rbn || fp) {
537 /* fp points to the next sk_buff in the current run;
538 * rbn points to the next run.
539 */
540 /* Go through the current run. */
541 while (fp) {
542 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
543 bool stolen;
544 int delta;
545
546 sum_truesize += fp->truesize;
547 if (head->ip_summed != fp->ip_summed)
548 head->ip_summed = CHECKSUM_NONE;
549 else if (head->ip_summed == CHECKSUM_COMPLETE)
550 head->csum = csum_add(head->csum, fp->csum);
551
552 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
553 &delta)) {
554 kfree_skb_partial(fp, stolen);
555 } else {
556 fp->prev = NULL;
557 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
558 fp->sk = NULL;
559
560 head->data_len += fp->len;
561 head->len += fp->len;
562 head->truesize += fp->truesize;
563
564 *nextp = fp;
565 nextp = &fp->next;
566 }
567
568 fp = next_frag;
569 }
570 /* Move to the next run. */
571 if (rbn) {
572 struct rb_node *rbnext = rb_next(rbn);
573
574 fp = rb_to_skb(rbn);
575 rb_erase(rbn, &q->rb_fragments);
576 rbn = rbnext;
577 }
578 }
579 sub_frag_mem_limit(q->fqdir, sum_truesize);
580
581 *nextp = NULL;
582 skb_mark_not_on_list(head);
583 head->prev = NULL;
584 head->tstamp = q->stamp;
585
586 if (sk)
587 refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
588}
589EXPORT_SYMBOL(inet_frag_reasm_finish);
590
591struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
592{
593 struct sk_buff *head, *skb;
594
595 head = skb_rb_first(&q->rb_fragments);
596 if (!head)
597 return NULL;
598 skb = FRAG_CB(head)->next_frag;
599 if (skb)
600 rb_replace_node(&head->rbnode, &skb->rbnode,
601 &q->rb_fragments);
602 else
603 rb_erase(&head->rbnode, &q->rb_fragments);
604 memset(&head->rbnode, 0, sizeof(head->rbnode));
605 barrier();
606
607 if (head == q->fragments_tail)
608 q->fragments_tail = NULL;
609
610 sub_frag_mem_limit(q->fqdir, head->truesize);
611
612 return head;
613}
614EXPORT_SYMBOL(inet_frag_pull_head);