blob: 340faaffd59b0fa112fcfff6d25bd54014edbee9 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/module.h>
16#include <linux/random.h>
17#include <linux/slab.h>
18#include <linux/xfrm.h>
19
20#include <net/addrconf.h>
21#include <net/inet_common.h>
22#include <net/inet_hashtables.h>
23#include <net/inet_sock.h>
24#include <net/inet6_connection_sock.h>
25#include <net/inet6_hashtables.h>
26#include <net/ip6_route.h>
27#include <net/ipv6.h>
28#include <net/protocol.h>
29#include <net/transp_v6.h>
30#include <net/ip6_checksum.h>
31#include <net/xfrm.h>
32#include <net/secure_seq.h>
33
34#include "dccp.h"
35#include "ipv6.h"
36#include "feat.h"
37
38/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
39
40static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
41static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
42
43static void dccp_v6_hash(struct sock *sk)
44{
45 if (sk->sk_state != DCCP_CLOSED) {
46 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
47 inet_hash(sk);
48 return;
49 }
50 local_bh_disable();
51 __inet6_hash(sk, NULL);
52 local_bh_enable();
53 }
54}
55
56/* add pseudo-header to DCCP checksum stored in skb->csum */
57static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
58 const struct in6_addr *saddr,
59 const struct in6_addr *daddr)
60{
61 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
62}
63
64static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
65{
66 struct ipv6_pinfo *np = inet6_sk(sk);
67 struct dccp_hdr *dh = dccp_hdr(skb);
68
69 dccp_csum_outgoing(skb);
70 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
71}
72
73static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
74{
75 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
76 ipv6_hdr(skb)->saddr.s6_addr32,
77 dccp_hdr(skb)->dccph_dport,
78 dccp_hdr(skb)->dccph_sport );
79
80}
81
82static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
83 u8 type, u8 code, int offset, __be32 info)
84{
85 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
86 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
87 struct dccp_sock *dp;
88 struct ipv6_pinfo *np;
89 struct sock *sk;
90 int err;
91 __u64 seq;
92 struct net *net = dev_net(skb->dev);
93
94 if (skb->len < offset + sizeof(*dh) ||
95 skb->len < offset + __dccp_basic_hdr_len(dh)) {
96 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
97 ICMP6_MIB_INERRORS);
98 return;
99 }
100
101 sk = inet6_lookup(net, &dccp_hashinfo,
102 &hdr->daddr, dh->dccph_dport,
103 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
104
105 if (sk == NULL) {
106 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
107 ICMP6_MIB_INERRORS);
108 return;
109 }
110
111 if (sk->sk_state == DCCP_TIME_WAIT) {
112 inet_twsk_put(inet_twsk(sk));
113 return;
114 }
115
116 bh_lock_sock(sk);
117 if (sock_owned_by_user(sk))
118 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
119
120 if (sk->sk_state == DCCP_CLOSED)
121 goto out;
122
123 dp = dccp_sk(sk);
124 seq = dccp_hdr_seq(dh);
125 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
126 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
127 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
128 goto out;
129 }
130
131 np = inet6_sk(sk);
132
133 if (type == ICMPV6_PKT_TOOBIG) {
134 struct dst_entry *dst = NULL;
135
136 if (sock_owned_by_user(sk))
137 goto out;
138 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
139 goto out;
140
141 /* icmp should have updated the destination cache entry */
142 dst = __sk_dst_check(sk, np->dst_cookie);
143 if (dst == NULL) {
144 struct inet_sock *inet = inet_sk(sk);
145 struct flowi6 fl6;
146
147 /* BUGGG_FUTURE: Again, it is not clear how
148 to handle rthdr case. Ignore this complexity
149 for now.
150 */
151 memset(&fl6, 0, sizeof(fl6));
152 fl6.flowi6_proto = IPPROTO_DCCP;
153 fl6.daddr = np->daddr;
154 fl6.saddr = np->saddr;
155 fl6.flowi6_oif = sk->sk_bound_dev_if;
156 fl6.fl6_dport = inet->inet_dport;
157 fl6.fl6_sport = inet->inet_sport;
158 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
159
160 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
161 if (IS_ERR(dst)) {
162 sk->sk_err_soft = -PTR_ERR(dst);
163 goto out;
164 }
165 } else
166 dst_hold(dst);
167
168 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
169 dccp_sync_mss(sk, dst_mtu(dst));
170 } /* else let the usual retransmit timer handle it */
171 dst_release(dst);
172 goto out;
173 }
174
175 icmpv6_err_convert(type, code, &err);
176
177 /* Might be for an request_sock */
178 switch (sk->sk_state) {
179 struct request_sock *req, **prev;
180 case DCCP_LISTEN:
181 if (sock_owned_by_user(sk))
182 goto out;
183
184 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
185 &hdr->daddr, &hdr->saddr,
186 inet6_iif(skb));
187 if (req == NULL)
188 goto out;
189
190 /*
191 * ICMPs are not backlogged, hence we cannot get an established
192 * socket here.
193 */
194 WARN_ON(req->sk != NULL);
195
196 if (!between48(seq, dccp_rsk(req)->dreq_iss,
197 dccp_rsk(req)->dreq_gss)) {
198 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
199 goto out;
200 }
201
202 inet_csk_reqsk_queue_drop(sk, req, prev);
203 goto out;
204
205 case DCCP_REQUESTING:
206 case DCCP_RESPOND: /* Cannot happen.
207 It can, it SYNs are crossed. --ANK */
208 if (!sock_owned_by_user(sk)) {
209 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
210 sk->sk_err = err;
211 /*
212 * Wake people up to see the error
213 * (see connect in sock.c)
214 */
215 sk->sk_error_report(sk);
216 dccp_done(sk);
217 } else
218 sk->sk_err_soft = err;
219 goto out;
220 }
221
222 if (!sock_owned_by_user(sk) && np->recverr) {
223 sk->sk_err = err;
224 sk->sk_error_report(sk);
225 } else
226 sk->sk_err_soft = err;
227
228out:
229 bh_unlock_sock(sk);
230 sock_put(sk);
231}
232
233
234static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
235 struct request_values *rv_unused)
236{
237 struct inet6_request_sock *ireq6 = inet6_rsk(req);
238 struct ipv6_pinfo *np = inet6_sk(sk);
239 struct sk_buff *skb;
240 //CVE-2016-3841struct ipv6_txoptions *opt = NULL;
241 struct in6_addr *final_p, final;
242 struct flowi6 fl6;
243 int err = -1;
244 struct dst_entry *dst;
245
246 memset(&fl6, 0, sizeof(fl6));
247 fl6.flowi6_proto = IPPROTO_DCCP;
248 fl6.daddr = ireq6->rmt_addr;
249 fl6.saddr = ireq6->loc_addr;
250 fl6.flowlabel = 0;
251 fl6.flowi6_oif = ireq6->iif;
252 fl6.fl6_dport = inet_rsk(req)->rmt_port;
253 fl6.fl6_sport = inet_rsk(req)->loc_port;
254 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
255
256 //CVE-2016-3841opt = np->opt;
257
258 //CVE-2016-3841final_p = fl6_update_dst(&fl6, opt, &final);
259 rcu_read_lock();
260 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
261 rcu_read_unlock();
262 //CVE-2016-3841
263
264 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
265 if (IS_ERR(dst)) {
266 err = PTR_ERR(dst);
267 dst = NULL;
268 goto done;
269 }
270
271 skb = dccp_make_response(sk, dst, req);
272 if (skb != NULL) {
273 struct dccp_hdr *dh = dccp_hdr(skb);
274
275 dh->dccph_checksum = dccp_v6_csum_finish(skb,
276 &ireq6->loc_addr,
277 &ireq6->rmt_addr);
278 fl6.daddr = ireq6->rmt_addr;
279 //CVE-2016-3841err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
280 rcu_read_lock();
281 err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
282 np->tclass);
283 rcu_read_unlock();
284 //CVE-2016-3841
285 err = net_xmit_eval(err);
286 }
287
288done:
289 //CVE-2016-3841if (opt != NULL && opt != np->opt)
290 //CVE-2016-3841sock_kfree_s(sk, opt, opt->tot_len);
291 dst_release(dst);
292 return err;
293}
294
295static void dccp_v6_reqsk_destructor(struct request_sock *req)
296{
297 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
298 if (inet6_rsk(req)->pktopts != NULL)
299 kfree_skb(inet6_rsk(req)->pktopts);
300}
301
302static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
303{
304 const struct ipv6hdr *rxip6h;
305 struct sk_buff *skb;
306 struct flowi6 fl6;
307 struct net *net = dev_net(skb_dst(rxskb)->dev);
308 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
309 struct dst_entry *dst;
310
311 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
312 return;
313
314 if (!ipv6_unicast_destination(rxskb))
315 return;
316
317 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
318 if (skb == NULL)
319 return;
320
321 rxip6h = ipv6_hdr(rxskb);
322 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
323 &rxip6h->daddr);
324
325 memset(&fl6, 0, sizeof(fl6));
326 fl6.daddr = rxip6h->saddr;
327 fl6.saddr = rxip6h->daddr;
328
329 fl6.flowi6_proto = IPPROTO_DCCP;
330 fl6.flowi6_oif = inet6_iif(rxskb);
331 fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
332 fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
333 security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
334
335 /* sk = NULL, but it is safe for now. RST socket required. */
336 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
337 if (!IS_ERR(dst)) {
338 skb_dst_set(skb, dst);
339 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
340 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
341 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
342 return;
343 }
344
345 kfree_skb(skb);
346}
347
348static struct request_sock_ops dccp6_request_sock_ops = {
349 .family = AF_INET6,
350 .obj_size = sizeof(struct dccp6_request_sock),
351 .rtx_syn_ack = dccp_v6_send_response,
352 .send_ack = dccp_reqsk_send_ack,
353 .destructor = dccp_v6_reqsk_destructor,
354 .send_reset = dccp_v6_ctl_send_reset,
355};
356
357static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
358{
359 const struct dccp_hdr *dh = dccp_hdr(skb);
360 const struct ipv6hdr *iph = ipv6_hdr(skb);
361 struct sock *nsk;
362 struct request_sock **prev;
363 /* Find possible connection requests. */
364 struct request_sock *req = inet6_csk_search_req(sk, &prev,
365 dh->dccph_sport,
366 &iph->saddr,
367 &iph->daddr,
368 inet6_iif(skb));
369 if (req != NULL)
370 return dccp_check_req(sk, skb, req, prev);
371
372 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
373 &iph->saddr, dh->dccph_sport,
374 &iph->daddr, ntohs(dh->dccph_dport),
375 inet6_iif(skb));
376 if (nsk != NULL) {
377 if (nsk->sk_state != DCCP_TIME_WAIT) {
378 bh_lock_sock(nsk);
379 return nsk;
380 }
381 inet_twsk_put(inet_twsk(nsk));
382 return NULL;
383 }
384
385 return sk;
386}
387
388static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
389{
390 struct request_sock *req;
391 struct dccp_request_sock *dreq;
392 struct inet6_request_sock *ireq6;
393 struct ipv6_pinfo *np = inet6_sk(sk);
394 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
395 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
396
397 if (skb->protocol == htons(ETH_P_IP))
398 return dccp_v4_conn_request(sk, skb);
399
400 if (!ipv6_unicast_destination(skb))
401 return 0; /* discard, don't send a reset here */
402
403 if (dccp_bad_service_code(sk, service)) {
404 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
405 goto drop;
406 }
407 /*
408 * There are no SYN attacks on IPv6, yet...
409 */
410 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
411 if (inet_csk_reqsk_queue_is_full(sk))
412 goto drop;
413
414 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
415 goto drop;
416
417 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
418 if (req == NULL)
419 goto drop;
420
421 if (dccp_reqsk_init(req, dccp_sk(sk), skb))
422 goto drop_and_free;
423
424 dreq = dccp_rsk(req);
425 if (dccp_parse_options(sk, dreq, skb))
426 goto drop_and_free;
427
428 if (security_inet_conn_request(sk, skb, req))
429 goto drop_and_free;
430
431 ireq6 = inet6_rsk(req);
432 ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
433 ireq6->loc_addr = ipv6_hdr(skb)->daddr;
434
435 if (ipv6_opt_accepted(sk, skb) ||
436 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
437 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
438 atomic_inc(&skb->users);
439 track_add(skb, 0, USER_INFO, 0);
440 ireq6->pktopts = skb;
441 }
442 ireq6->iif = sk->sk_bound_dev_if;
443
444 /* So that link locals have meaning */
445 if (!sk->sk_bound_dev_if &&
446 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
447 ireq6->iif = inet6_iif(skb);
448
449 /*
450 * Step 3: Process LISTEN state
451 *
452 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
453 *
454 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
455 */
456 dreq->dreq_isr = dcb->dccpd_seq;
457 dreq->dreq_gsr = dreq->dreq_isr;
458 dreq->dreq_iss = dccp_v6_init_sequence(skb);
459 dreq->dreq_gss = dreq->dreq_iss;
460 dreq->dreq_service = service;
461
462 if (dccp_v6_send_response(sk, req, NULL))
463 goto drop_and_free;
464
465 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
466 return 0;
467
468drop_and_free:
469 reqsk_free(req);
470drop:
471 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
472 return -1;
473}
474
475static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
476 struct sk_buff *skb,
477 struct request_sock *req,
478 struct dst_entry *dst)
479{
480 struct inet6_request_sock *ireq6 = inet6_rsk(req);
481 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
482 struct inet_sock *newinet;
483 struct dccp6_sock *newdp6;
484 struct sock *newsk;
485 struct ipv6_txoptions *opt;
486
487 if (skb->protocol == htons(ETH_P_IP)) {
488 /*
489 * v6 mapped
490 */
491 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
492 if (newsk == NULL)
493 return NULL;
494
495 newdp6 = (struct dccp6_sock *)newsk;
496 newinet = inet_sk(newsk);
497 newinet->pinet6 = &newdp6->inet6;
498 newnp = inet6_sk(newsk);
499
500 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
501
502 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
503
504 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
505
506 newnp->rcv_saddr = newnp->saddr;
507
508 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
509 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
510 newnp->pktoptions = NULL;
511 //HubÖÎÀí:CVE-2017-9077
512 newnp->ipv6_mc_list = NULL;
513 newnp->ipv6_ac_list = NULL;
514 newnp->ipv6_fl_list = NULL;
515 newnp->opt = NULL;
516 newnp->mcast_oif = inet6_iif(skb);
517 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
518
519 /*
520 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
521 * here, dccp_create_openreq_child now does this for us, see the comment in
522 * that function for the gory details. -acme
523 */
524
525 /* It is tricky place. Until this moment IPv4 tcp
526 worked with IPv6 icsk.icsk_af_ops.
527 Sync it now.
528 */
529 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
530
531 return newsk;
532 }
533
534 //CVE-2016-3841opt = np->opt;
535
536 if (sk_acceptq_is_full(sk))
537 goto out_overflow;
538
539 if (dst == NULL) {
540 struct in6_addr *final_p, final;
541 struct flowi6 fl6;
542
543 memset(&fl6, 0, sizeof(fl6));
544 fl6.flowi6_proto = IPPROTO_DCCP;
545 fl6.daddr = ireq6->rmt_addr;
546 //CVE-2016-3841final_p = fl6_update_dst(&fl6, opt, &final);
547 rcu_read_lock();
548 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
549 rcu_read_unlock();//CVE-2016-3841
550 fl6.saddr = ireq6->loc_addr;
551 fl6.flowi6_oif = sk->sk_bound_dev_if;
552 fl6.fl6_dport = inet_rsk(req)->rmt_port;
553 fl6.fl6_sport = inet_rsk(req)->loc_port;
554 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
555
556 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
557 if (IS_ERR(dst))
558 goto out;
559 }
560
561 newsk = dccp_create_openreq_child(sk, req, skb);
562 if (newsk == NULL)
563 goto out_nonewsk;
564
565 /*
566 * No need to charge this sock to the relevant IPv6 refcnt debug socks
567 * count here, dccp_create_openreq_child now does this for us, see the
568 * comment in that function for the gory details. -acme
569 */
570
571 __ip6_dst_store(newsk, dst, NULL, NULL);
572 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
573 NETIF_F_TSO);
574 newdp6 = (struct dccp6_sock *)newsk;
575 newinet = inet_sk(newsk);
576 newinet->pinet6 = &newdp6->inet6;
577 newnp = inet6_sk(newsk);
578
579 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
580
581 newnp->daddr = ireq6->rmt_addr;
582 newnp->saddr = ireq6->loc_addr;
583 newnp->rcv_saddr = ireq6->loc_addr;
584 newsk->sk_bound_dev_if = ireq6->iif;
585
586 /* Now IPv6 options...
587
588 First: no IPv4 options.
589 */
590 newinet->inet_opt = NULL;
591
592 /* Clone RX bits */
593 newnp->rxopt.all = np->rxopt.all;
594
595 //HubÖÎÀí:CVE-2017-9077
596 newnp->ipv6_mc_list = NULL;
597 newnp->ipv6_ac_list = NULL;
598 newnp->ipv6_fl_list = NULL;
599 /* Clone pktoptions received with SYN */
600 newnp->pktoptions = NULL;
601 if (ireq6->pktopts != NULL) {
602 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
603 kfree_skb(ireq6->pktopts);
604 ireq6->pktopts = NULL;
605 if (newnp->pktoptions)
606 skb_set_owner_r(newnp->pktoptions, newsk);
607 }
608 newnp->opt = NULL;
609 newnp->mcast_oif = inet6_iif(skb);
610 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
611
612 /*
613 * Clone native IPv6 options from listening socket (if any)
614 *
615 * Yes, keeping reference count would be much more clever, but we make
616 * one more one thing there: reattach optmem to newsk.
617 */
618 opt = rcu_dereference(np->opt);//CVE-2016-3841
619 if (opt != NULL) {/*//CVE-2016-3841
620 newnp->opt = ipv6_dup_options(newsk, opt);
621 if (opt != np->opt)
622 sock_kfree_s(sk, opt, opt->tot_len);*/
623 opt = ipv6_dup_options(newsk, opt);
624 RCU_INIT_POINTER(newnp->opt, opt);
625 //CVE-2016-3841
626 }
627
628 inet_csk(newsk)->icsk_ext_hdr_len = 0;
629 /*//CVE-2016-3841
630 if (newnp->opt != NULL)
631 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
632 newnp->opt->opt_flen);*/
633 if (opt)
634 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
635 opt->opt_flen;
636 //CVE-2016-3841
637 dccp_sync_mss(newsk, dst_mtu(dst));
638
639 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
640 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
641
642 if (__inet_inherit_port(sk, newsk) < 0) {
643 inet_csk_prepare_forced_close(newsk);
644 dccp_done(newsk);
645 goto out;
646 }
647 __inet6_hash(newsk, NULL);
648
649 return newsk;
650
651out_overflow:
652 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
653out_nonewsk:
654 dst_release(dst);
655out:
656 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
657 /*//CVE-2016-3841
658 if (opt != NULL && opt != np->opt)
659 sock_kfree_s(sk, opt, opt->tot_len);*/
660 return NULL;
661}
662
663/* The socket must have it's spinlock held when we get
664 * here.
665 *
666 * We have a potential double-lock case here, so even when
667 * doing backlog processing we use the BH locking scheme.
668 * This is because we cannot sleep with the original spinlock
669 * held.
670 */
671static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
672{
673 struct ipv6_pinfo *np = inet6_sk(sk);
674 struct sk_buff *opt_skb = NULL;
675
676 /* Imagine: socket is IPv6. IPv4 packet arrives,
677 goes to IPv4 receive handler and backlogged.
678 From backlog it always goes here. Kerboom...
679 Fortunately, dccp_rcv_established and rcv_established
680 handle them correctly, but it is not case with
681 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
682 */
683
684 if (skb->protocol == htons(ETH_P_IP))
685 return dccp_v4_do_rcv(sk, skb);
686
687 if (sk_filter(sk, skb))
688 goto discard;
689
690 /*
691 * socket locking is here for SMP purposes as backlog rcv is currently
692 * called with bh processing disabled.
693 */
694
695 /* Do Stevens' IPV6_PKTOPTIONS.
696
697 Yes, guys, it is the only place in our code, where we
698 may make it not affecting IPv4.
699 The rest of code is protocol independent,
700 and I do not like idea to uglify IPv4.
701
702 Actually, all the idea behind IPV6_PKTOPTIONS
703 looks not very well thought. For now we latch
704 options, received in the last packet, enqueued
705 by tcp. Feel free to propose better solution.
706 --ANK (980728)
707 */
708 if (np->rxopt.all)
709 /*
710 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
711 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
712 */
713 opt_skb = skb_clone(skb, GFP_ATOMIC);
714
715 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
716 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
717 goto reset;
718 if (opt_skb) {
719 /* XXX This is where we would goto ipv6_pktoptions. */
720 __kfree_skb(opt_skb);
721 }
722 return 0;
723 }
724
725 /*
726 * Step 3: Process LISTEN state
727 * If S.state == LISTEN,
728 * If P.type == Request or P contains a valid Init Cookie option,
729 * (* Must scan the packet's options to check for Init
730 * Cookies. Only Init Cookies are processed here,
731 * however; other options are processed in Step 8. This
732 * scan need only be performed if the endpoint uses Init
733 * Cookies *)
734 * (* Generate a new socket and switch to that socket *)
735 * Set S := new socket for this port pair
736 * S.state = RESPOND
737 * Choose S.ISS (initial seqno) or set from Init Cookies
738 * Initialize S.GAR := S.ISS
739 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
740 * Continue with S.state == RESPOND
741 * (* A Response packet will be generated in Step 11 *)
742 * Otherwise,
743 * Generate Reset(No Connection) unless P.type == Reset
744 * Drop packet and return
745 *
746 * NOTE: the check for the packet types is done in
747 * dccp_rcv_state_process
748 */
749 if (sk->sk_state == DCCP_LISTEN) {
750 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
751
752 if (nsk == NULL)
753 goto discard;
754 /*
755 * Queue it on the new socket if the new socket is active,
756 * otherwise we just shortcircuit this and continue with
757 * the new socket..
758 */
759 if (nsk != sk) {
760 if (dccp_child_process(sk, nsk, skb))
761 goto reset;
762 if (opt_skb != NULL)
763 __kfree_skb(opt_skb);
764 return 0;
765 }
766 }
767
768 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
769 goto reset;
770 if (opt_skb) {
771 /* XXX This is where we would goto ipv6_pktoptions. */
772 __kfree_skb(opt_skb);
773 }
774 return 0;
775
776reset:
777 dccp_v6_ctl_send_reset(sk, skb);
778discard:
779 if (opt_skb != NULL)
780 __kfree_skb(opt_skb);
781 kfree_skb(skb);
782 return 0;
783}
784
785static int dccp_v6_rcv(struct sk_buff *skb)
786{
787 const struct dccp_hdr *dh;
788 struct sock *sk;
789 int min_cov;
790
791 /* Step 1: Check header basics */
792
793 if (dccp_invalid_packet(skb))
794 goto discard_it;
795
796 /* Step 1: If header checksum is incorrect, drop packet and return. */
797 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
798 &ipv6_hdr(skb)->daddr)) {
799 DCCP_WARN("dropped packet with invalid checksum\n");
800 goto discard_it;
801 }
802
803 dh = dccp_hdr(skb);
804
805 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
806 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
807
808 if (dccp_packet_without_ack(skb))
809 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
810 else
811 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
812
813 /* Step 2:
814 * Look up flow ID in table and get corresponding socket */
815 sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
816 dh->dccph_sport, dh->dccph_dport);
817 /*
818 * Step 2:
819 * If no socket ...
820 */
821 if (sk == NULL) {
822 dccp_pr_debug("failed to look up flow ID in table and "
823 "get corresponding socket\n");
824 goto no_dccp_socket;
825 }
826
827 /*
828 * Step 2:
829 * ... or S.state == TIMEWAIT,
830 * Generate Reset(No Connection) unless P.type == Reset
831 * Drop packet and return
832 */
833 if (sk->sk_state == DCCP_TIME_WAIT) {
834 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
835 inet_twsk_put(inet_twsk(sk));
836 goto no_dccp_socket;
837 }
838
839 /*
840 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
841 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
842 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
843 */
844 min_cov = dccp_sk(sk)->dccps_pcrlen;
845 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
846 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
847 dh->dccph_cscov, min_cov);
848 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
849 goto discard_and_relse;
850 }
851
852 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
853 goto discard_and_relse;
854
855 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
856
857no_dccp_socket:
858 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
859 goto discard_it;
860 /*
861 * Step 2:
862 * If no socket ...
863 * Generate Reset(No Connection) unless P.type == Reset
864 * Drop packet and return
865 */
866 if (dh->dccph_type != DCCP_PKT_RESET) {
867 DCCP_SKB_CB(skb)->dccpd_reset_code =
868 DCCP_RESET_CODE_NO_CONNECTION;
869 dccp_v6_ctl_send_reset(sk, skb);
870 }
871
872discard_it:
873 kfree_skb(skb);
874 return 0;
875
876discard_and_relse:
877 sock_put(sk);
878 goto discard_it;
879}
880
881static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
882 int addr_len)
883{
884 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
885 struct inet_connection_sock *icsk = inet_csk(sk);
886 struct inet_sock *inet = inet_sk(sk);
887 struct ipv6_pinfo *np = inet6_sk(sk);
888 struct dccp_sock *dp = dccp_sk(sk);
889 struct in6_addr *saddr = NULL, *final_p, final;
890 struct ipv6_txoptions *opt;//CVE-2016-3841
891 struct flowi6 fl6;
892 struct dst_entry *dst;
893 int addr_type;
894 int err;
895
896 dp->dccps_role = DCCP_ROLE_CLIENT;
897
898 if (addr_len < SIN6_LEN_RFC2133)
899 return -EINVAL;
900
901 if (usin->sin6_family != AF_INET6)
902 return -EAFNOSUPPORT;
903
904 memset(&fl6, 0, sizeof(fl6));
905
906 if (np->sndflow) {
907 fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
908 IP6_ECN_flow_init(fl6.flowlabel);
909 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
910 struct ip6_flowlabel *flowlabel;
911 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
912 if (flowlabel == NULL)
913 return -EINVAL;
914 usin->sin6_addr = flowlabel->dst;
915 fl6_sock_release(flowlabel);
916 }
917 }
918 /*
919 * connect() to INADDR_ANY means loopback (BSD'ism).
920 */
921 if (ipv6_addr_any(&usin->sin6_addr))
922 usin->sin6_addr.s6_addr[15] = 1;
923
924 addr_type = ipv6_addr_type(&usin->sin6_addr);
925
926 if (addr_type & IPV6_ADDR_MULTICAST)
927 return -ENETUNREACH;
928
929 if (addr_type & IPV6_ADDR_LINKLOCAL) {
930 if (addr_len >= sizeof(struct sockaddr_in6) &&
931 usin->sin6_scope_id) {
932 /* If interface is set while binding, indices
933 * must coincide.
934 */
935 if (sk->sk_bound_dev_if &&
936 sk->sk_bound_dev_if != usin->sin6_scope_id)
937 return -EINVAL;
938
939 sk->sk_bound_dev_if = usin->sin6_scope_id;
940 }
941
942 /* Connect to link-local address requires an interface */
943 if (!sk->sk_bound_dev_if)
944 return -EINVAL;
945 }
946
947 np->daddr = usin->sin6_addr;
948 np->flow_label = fl6.flowlabel;
949
950 /*
951 * DCCP over IPv4
952 */
953 if (addr_type == IPV6_ADDR_MAPPED) {
954 u32 exthdrlen = icsk->icsk_ext_hdr_len;
955 struct sockaddr_in sin;
956
957 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
958
959 if (__ipv6_only_sock(sk))
960 return -ENETUNREACH;
961
962 sin.sin_family = AF_INET;
963 sin.sin_port = usin->sin6_port;
964 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
965
966 icsk->icsk_af_ops = &dccp_ipv6_mapped;
967 sk->sk_backlog_rcv = dccp_v4_do_rcv;
968
969 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
970 if (err) {
971 icsk->icsk_ext_hdr_len = exthdrlen;
972 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
973 sk->sk_backlog_rcv = dccp_v6_do_rcv;
974 goto failure;
975 }
976 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
977 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
978
979 return err;
980 }
981
982 if (!ipv6_addr_any(&np->rcv_saddr))
983 saddr = &np->rcv_saddr;
984
985 fl6.flowi6_proto = IPPROTO_DCCP;
986 fl6.daddr = np->daddr;
987 fl6.saddr = saddr ? *saddr : np->saddr;
988 fl6.flowi6_oif = sk->sk_bound_dev_if;
989 fl6.fl6_dport = usin->sin6_port;
990 fl6.fl6_sport = inet->inet_sport;
991 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
992
993 //CVE-2016-3841final_p = fl6_update_dst(&fl6, np->opt, &final);
994 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
995 final_p = fl6_update_dst(&fl6, opt, &final);
996 //CVE-2016-3841
997 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
998 if (IS_ERR(dst)) {
999 err = PTR_ERR(dst);
1000 goto failure;
1001 }
1002
1003 if (saddr == NULL) {
1004 saddr = &fl6.saddr;
1005 np->rcv_saddr = *saddr;
1006 }
1007
1008 /* set the source address */
1009 np->saddr = *saddr;
1010 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1011
1012 __ip6_dst_store(sk, dst, NULL, NULL);
1013
1014 icsk->icsk_ext_hdr_len = 0;
1015 /*//CVE-2016-3841
1016 if (np->opt != NULL)
1017 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1018 np->opt->opt_nflen);*/
1019 if (opt)
1020 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
1021 //CVE-2016-3841
1022 inet->inet_dport = usin->sin6_port;
1023
1024 dccp_set_state(sk, DCCP_REQUESTING);
1025 err = inet6_hash_connect(&dccp_death_row, sk);
1026 if (err)
1027 goto late_failure;
1028
1029 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
1030 np->daddr.s6_addr32,
1031 inet->inet_sport,
1032 inet->inet_dport);
1033 err = dccp_connect(sk);
1034 if (err)
1035 goto late_failure;
1036
1037 return 0;
1038
1039late_failure:
1040 dccp_set_state(sk, DCCP_CLOSED);
1041 __sk_dst_reset(sk);
1042failure:
1043 inet->inet_dport = 0;
1044 sk->sk_route_caps = 0;
1045 return err;
1046}
1047
1048static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1049 .queue_xmit = inet6_csk_xmit,
1050 .send_check = dccp_v6_send_check,
1051 .rebuild_header = inet6_sk_rebuild_header,
1052 .conn_request = dccp_v6_conn_request,
1053 .syn_recv_sock = dccp_v6_request_recv_sock,
1054 .net_header_len = sizeof(struct ipv6hdr),
1055 .setsockopt = ipv6_setsockopt,
1056 .getsockopt = ipv6_getsockopt,
1057 .addr2sockaddr = inet6_csk_addr2sockaddr,
1058 .sockaddr_len = sizeof(struct sockaddr_in6),
1059 .bind_conflict = inet6_csk_bind_conflict,
1060#ifdef CONFIG_COMPAT
1061 .compat_setsockopt = compat_ipv6_setsockopt,
1062 .compat_getsockopt = compat_ipv6_getsockopt,
1063#endif
1064};
1065
1066/*
1067 * DCCP over IPv4 via INET6 API
1068 */
1069static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1070 .queue_xmit = ip_queue_xmit,
1071 .send_check = dccp_v4_send_check,
1072 .rebuild_header = inet_sk_rebuild_header,
1073 .conn_request = dccp_v6_conn_request,
1074 .syn_recv_sock = dccp_v6_request_recv_sock,
1075 .net_header_len = sizeof(struct iphdr),
1076 .setsockopt = ipv6_setsockopt,
1077 .getsockopt = ipv6_getsockopt,
1078 .addr2sockaddr = inet6_csk_addr2sockaddr,
1079 .sockaddr_len = sizeof(struct sockaddr_in6),
1080#ifdef CONFIG_COMPAT
1081 .compat_setsockopt = compat_ipv6_setsockopt,
1082 .compat_getsockopt = compat_ipv6_getsockopt,
1083#endif
1084};
1085
1086/* NOTE: A lot of things set to zero explicitly by call to
1087 * sk_alloc() so need not be done here.
1088 */
1089static int dccp_v6_init_sock(struct sock *sk)
1090{
1091 static __u8 dccp_v6_ctl_sock_initialized;
1092 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1093
1094 if (err == 0) {
1095 if (unlikely(!dccp_v6_ctl_sock_initialized))
1096 dccp_v6_ctl_sock_initialized = 1;
1097 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1098 }
1099
1100 return err;
1101}
1102
1103static void dccp_v6_destroy_sock(struct sock *sk)
1104{
1105 dccp_destroy_sock(sk);
1106 inet6_destroy_sock(sk);
1107}
1108
1109static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1110 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1111};
1112
1113static struct proto dccp_v6_prot = {
1114 .name = "DCCPv6",
1115 .owner = THIS_MODULE,
1116 .close = dccp_close,
1117 .connect = dccp_v6_connect,
1118 .disconnect = dccp_disconnect,
1119 .ioctl = dccp_ioctl,
1120 .init = dccp_v6_init_sock,
1121 .setsockopt = dccp_setsockopt,
1122 .getsockopt = dccp_getsockopt,
1123 .sendmsg = dccp_sendmsg,
1124 .recvmsg = dccp_recvmsg,
1125 .backlog_rcv = dccp_v6_do_rcv,
1126 .hash = dccp_v6_hash,
1127 .unhash = inet_unhash,
1128 .accept = inet_csk_accept,
1129 .get_port = inet_csk_get_port,
1130 .shutdown = dccp_shutdown,
1131 .destroy = dccp_v6_destroy_sock,
1132 .orphan_count = &dccp_orphan_count,
1133 .max_header = MAX_DCCP_HEADER,
1134 .obj_size = sizeof(struct dccp6_sock),
1135 .slab_flags = SLAB_DESTROY_BY_RCU,
1136 .rsk_prot = &dccp6_request_sock_ops,
1137 .twsk_prot = &dccp6_timewait_sock_ops,
1138 .h.hashinfo = &dccp_hashinfo,
1139#ifdef CONFIG_COMPAT
1140 .compat_setsockopt = compat_dccp_setsockopt,
1141 .compat_getsockopt = compat_dccp_getsockopt,
1142#endif
1143};
1144
1145static const struct inet6_protocol dccp_v6_protocol = {
1146 .handler = dccp_v6_rcv,
1147 .err_handler = dccp_v6_err,
1148 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1149};
1150
1151static const struct proto_ops inet6_dccp_ops = {
1152 .family = PF_INET6,
1153 .owner = THIS_MODULE,
1154 .release = inet6_release,
1155 .bind = inet6_bind,
1156 .connect = inet_stream_connect,
1157 .socketpair = sock_no_socketpair,
1158 .accept = inet_accept,
1159 .getname = inet6_getname,
1160 .poll = dccp_poll,
1161 .ioctl = inet6_ioctl,
1162 .listen = inet_dccp_listen,
1163 .shutdown = inet_shutdown,
1164 .setsockopt = sock_common_setsockopt,
1165 .getsockopt = sock_common_getsockopt,
1166 .sendmsg = inet_sendmsg,
1167 .recvmsg = sock_common_recvmsg,
1168 .mmap = sock_no_mmap,
1169 .sendpage = sock_no_sendpage,
1170#ifdef CONFIG_COMPAT
1171 .compat_setsockopt = compat_sock_common_setsockopt,
1172 .compat_getsockopt = compat_sock_common_getsockopt,
1173#endif
1174};
1175
1176static struct inet_protosw dccp_v6_protosw = {
1177 .type = SOCK_DCCP,
1178 .protocol = IPPROTO_DCCP,
1179 .prot = &dccp_v6_prot,
1180 .ops = &inet6_dccp_ops,
1181 .flags = INET_PROTOSW_ICSK,
1182};
1183
1184static int __net_init dccp_v6_init_net(struct net *net)
1185{
1186 if (dccp_hashinfo.bhash == NULL)
1187 return -ESOCKTNOSUPPORT;
1188
1189 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1190 SOCK_DCCP, IPPROTO_DCCP, net);
1191}
1192
1193static void __net_exit dccp_v6_exit_net(struct net *net)
1194{
1195 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1196}
1197
1198static struct pernet_operations dccp_v6_ops = {
1199 .init = dccp_v6_init_net,
1200 .exit = dccp_v6_exit_net,
1201};
1202
1203static int __init dccp_v6_init(void)
1204{
1205 int err = proto_register(&dccp_v6_prot, 1);
1206
1207 if (err != 0)
1208 goto out;
1209
1210 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1211 if (err != 0)
1212 goto out_unregister_proto;
1213
1214 inet6_register_protosw(&dccp_v6_protosw);
1215
1216 err = register_pernet_subsys(&dccp_v6_ops);
1217 if (err != 0)
1218 goto out_destroy_ctl_sock;
1219out:
1220 return err;
1221
1222out_destroy_ctl_sock:
1223 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1224 inet6_unregister_protosw(&dccp_v6_protosw);
1225out_unregister_proto:
1226 proto_unregister(&dccp_v6_prot);
1227 goto out;
1228}
1229
1230static void __exit dccp_v6_exit(void)
1231{
1232 unregister_pernet_subsys(&dccp_v6_ops);
1233 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1234 inet6_unregister_protosw(&dccp_v6_protosw);
1235 proto_unregister(&dccp_v6_prot);
1236}
1237
1238module_init(dccp_v6_init);
1239module_exit(dccp_v6_exit);
1240
1241/*
1242 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1243 * values directly, Also cover the case where the protocol is not specified,
1244 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1245 */
1246MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1247MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1248MODULE_LICENSE("GPL");
1249MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1250MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");