blob: 2c69f6f2ec7f8581f7f01078d7197992568f2bb9 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
22#include <linux/slab.h>
23
24#include <net/sock.h>
25#include <net/inet_frag.h>
26
27static void inet_frag_secret_rebuild(unsigned long dummy)
28{
29 struct inet_frags *f = (struct inet_frags *)dummy;
30 unsigned long now = jiffies;
31 int i;
32
33 write_lock(&f->lock);
34 get_random_bytes(&f->rnd, sizeof(u32));
35 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
36 struct inet_frag_queue *q;
37 struct hlist_node *p, *n;
38
39 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
40 unsigned int hval = f->hashfn(q);
41
42 if (hval != i) {
43 hlist_del(&q->list);
44
45 /* Relink to new hash chain. */
46 hlist_add_head(&q->list, &f->hash[hval]);
47 }
48 }
49 }
50 write_unlock(&f->lock);
51
52 mod_timer(&f->secret_timer, now + f->secret_interval);
53}
54
55void inet_frags_init(struct inet_frags *f)
56{
57 int i;
58
59 for (i = 0; i < INETFRAGS_HASHSZ; i++)
60 INIT_HLIST_HEAD(&f->hash[i]);
61
62 rwlock_init(&f->lock);
63
64 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
65 (jiffies ^ (jiffies >> 6)));
66
67 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
68 (unsigned long)f);
69 f->secret_timer.expires = jiffies + f->secret_interval;
70 add_timer(&f->secret_timer);
71}
72EXPORT_SYMBOL(inet_frags_init);
73
74void inet_frags_init_net(struct netns_frags *nf)
75{
76 nf->nqueues = 0;
77 atomic_set(&nf->mem, 0);
78 INIT_LIST_HEAD(&nf->lru_list);
79 spin_lock_init(&nf->lru_lock); //hub:CVE-2014-0100
80}
81EXPORT_SYMBOL(inet_frags_init_net);
82
83void inet_frags_fini(struct inet_frags *f)
84{
85 del_timer(&f->secret_timer);
86}
87EXPORT_SYMBOL(inet_frags_fini);
88
89void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
90{
91 nf->low_thresh = 0;
92
93 local_bh_disable();
94 inet_frag_evictor(nf, f);
95 local_bh_enable();
96}
97EXPORT_SYMBOL(inet_frags_exit_net);
98
99static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
100{
101 write_lock(&f->lock);
102 hlist_del(&fq->list);
103 fq->net->nqueues--;
104 write_unlock(&f->lock);
105
106 inet_frag_lru_del(fq);//hub:CVE-2014-0100
107}
108
109void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
110{
111 if (del_timer(&fq->timer))
112 atomic_dec(&fq->refcnt);
113
114 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
115 fq_unlink(fq, f);
116 atomic_dec(&fq->refcnt);
117 fq->last_in |= INET_FRAG_COMPLETE;
118 }
119}
120EXPORT_SYMBOL(inet_frag_kill);
121
122static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
123 struct sk_buff *skb, int *work)
124{
125 if (work)
126 *work -= skb->truesize;
127
128 atomic_sub(skb->truesize, &nf->mem);
129 if (f->skb_free)
130 f->skb_free(skb);
131 kfree_skb(skb);
132}
133
134void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
135 int *work)
136{
137 struct sk_buff *fp;
138 struct netns_frags *nf;
139
140 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
141 WARN_ON(del_timer(&q->timer) != 0);
142
143 /* Release all fragment data. */
144 fp = q->fragments;
145 nf = q->net;
146 while (fp) {
147 struct sk_buff *xp = fp->next;
148
149 frag_kfree_skb(nf, f, fp, work);
150 fp = xp;
151 }
152
153 if (work)
154 *work -= f->qsize;
155 atomic_sub(f->qsize, &nf->mem);
156
157 if (f->destructor)
158 f->destructor(q);
159 kfree(q);
160
161}
162EXPORT_SYMBOL(inet_frag_destroy);
163
164int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f)
165{
166 struct inet_frag_queue *q;
167 int work, evicted = 0;
168
169 work = atomic_read(&nf->mem) - nf->low_thresh;
170 while (work > 0) {
171 spin_lock(&nf->lru_lock); //hub:CVE-2014-0100
172 if (list_empty(&nf->lru_list)) {
173 spin_unlock(&nf->lru_lock); //hub:CVE-2014-0100
174 break;
175 }
176
177 q = list_first_entry(&nf->lru_list,
178 struct inet_frag_queue, lru_list);
179 atomic_inc(&q->refcnt);
180 spin_unlock(&nf->lru_lock); //hub:CVE-2014-0100
181
182 spin_lock(&q->lock);
183 if (!(q->last_in & INET_FRAG_COMPLETE))
184 inet_frag_kill(q, f);
185 spin_unlock(&q->lock);
186
187 if (atomic_dec_and_test(&q->refcnt))
188 inet_frag_destroy(q, f, &work);
189 evicted++;
190 }
191
192 return evicted;
193}
194EXPORT_SYMBOL(inet_frag_evictor);
195
196static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
197 struct inet_frag_queue *qp_in, struct inet_frags *f,
198 void *arg)
199{
200 struct inet_frag_queue *qp;
201#ifdef CONFIG_SMP
202 struct hlist_node *n;
203#endif
204 unsigned int hash;
205
206 write_lock(&f->lock);
207 /*
208 * While we stayed w/o the lock other CPU could update
209 * the rnd seed, so we need to re-calculate the hash
210 * chain. Fortunatelly the qp_in can be used to get one.
211 */
212 hash = f->hashfn(qp_in);
213#ifdef CONFIG_SMP
214 /* With SMP race we have to recheck hash table, because
215 * such entry could be created on other cpu, while we
216 * promoted read lock to write lock.
217 */
218 hlist_for_each_entry(qp, n, &f->hash[hash], list) {
219 if (qp->net == nf && f->match(qp, arg)) {
220 atomic_inc(&qp->refcnt);
221 write_unlock(&f->lock);
222 qp_in->last_in |= INET_FRAG_COMPLETE;
223 inet_frag_put(qp_in, f);
224 return qp;
225 }
226 }
227#endif
228 qp = qp_in;
229 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
230 atomic_inc(&qp->refcnt);
231
232 atomic_inc(&qp->refcnt);
233 hlist_add_head(&qp->list, &f->hash[hash]);
234 nf->nqueues++;
235 write_unlock(&f->lock);
236 inet_frag_lru_add(nf, qp); //hub:CVE-2014-0100
237 return qp;
238}
239
240static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
241 struct inet_frags *f, void *arg)
242{
243 struct inet_frag_queue *q;
244
245 q = kzalloc(f->qsize, GFP_ATOMIC);
246 if (q == NULL)
247 return NULL;
248
249 f->constructor(q, arg);
250 atomic_add(f->qsize, &nf->mem);
251 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
252 spin_lock_init(&q->lock);
253 atomic_set(&q->refcnt, 1);
254 q->net = nf;
255
256 return q;
257}
258
259static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
260 struct inet_frags *f, void *arg)
261{
262 struct inet_frag_queue *q;
263
264 q = inet_frag_alloc(nf, f, arg);
265 if (q == NULL)
266 return NULL;
267
268 return inet_frag_intern(nf, q, f, arg);
269}
270
271struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
272 struct inet_frags *f, void *key, unsigned int hash)
273 __releases(&f->lock)
274{
275 struct inet_frag_queue *q;
276 struct hlist_node *n;
277 int depth = 0;
278
279 hlist_for_each_entry(q, n, &f->hash[hash], list) {
280 if (q->net == nf && f->match(q, key)) {
281 atomic_inc(&q->refcnt);
282 read_unlock(&f->lock);
283 return q;
284 }
285 depth++;
286 }
287 read_unlock(&f->lock);
288
289 if (depth <= INETFRAGS_MAXDEPTH)
290 return inet_frag_create(nf, f, key);
291 else
292 return ERR_PTR(-ENOBUFS);
293}
294EXPORT_SYMBOL(inet_frag_find);
295
296void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
297 const char *prefix)
298{
299 static const char msg[] = "inet_frag_find: Fragment hash bucket"
300 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
301 ". Dropping fragment.\n";
302
303 if (PTR_ERR(q) == -ENOBUFS)
304 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
305}
306EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);