blob: 37a9f2a252638e99c8785f75188e287460dd476e [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <asm/ioctls.h>
15#include <linux/icmp.h>
16#include <linux/module.h>
17#include <linux/skbuff.h>
18#include <linux/random.h>
19#include <linux/socket.h>
20#include <linux/l2tp.h>
21#include <linux/in.h>
22#include <net/sock.h>
23#include <net/ip.h>
24#include <net/icmp.h>
25#include <net/udp.h>
26#include <net/inet_common.h>
27#include <net/tcp_states.h>
28#include <net/protocol.h>
29#include <net/xfrm.h>
30
31#include "l2tp_core.h"
32
33struct l2tp_ip_sock {
34 /* inet_sock has to be the first member of l2tp_ip_sock */
35 struct inet_sock inet;
36
37 u32 conn_id;
38 u32 peer_conn_id;
39};
40
41static DEFINE_RWLOCK(l2tp_ip_lock);
42static struct hlist_head l2tp_ip_table;
43static struct hlist_head l2tp_ip_bind_table;
44
45static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
46{
47 return (struct l2tp_ip_sock *)sk;
48}
49
50static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
51 __be32 raddr, int dif, u32 tunnel_id)
52{
53 struct sock *sk;
54
55 sk_for_each_bound(sk, &l2tp_ip_bind_table) {
56 const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
57 const struct inet_sock *inet = inet_sk(sk);
58
59 if (!net_eq(sock_net(sk), net))
60 continue;
61
62 if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
63 continue;
64
65 if (inet->inet_rcv_saddr && laddr &&
66 inet->inet_rcv_saddr != laddr)
67 continue;
68
69 if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
70 continue;
71
72 if (l2tp->conn_id != tunnel_id)
73 continue;
74
75 goto found;
76 }
77
78 sk = NULL;
79found:
80 return sk;
81}
82
83/* When processing receive frames, there are two cases to
84 * consider. Data frames consist of a non-zero session-id and an
85 * optional cookie. Control frames consist of a regular L2TP header
86 * preceded by 32-bits of zeros.
87 *
88 * L2TPv3 Session Header Over IP
89 *
90 * 0 1 2 3
91 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
92 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
93 * | Session ID |
94 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
95 * | Cookie (optional, maximum 64 bits)...
96 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
97 * |
98 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
99 *
100 * L2TPv3 Control Message Header Over IP
101 *
102 * 0 1 2 3
103 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
104 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
105 * | (32 bits of zeros) |
106 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
107 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
108 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
109 * | Control Connection ID |
110 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
111 * | Ns | Nr |
112 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
113 *
114 * All control frames are passed to userspace.
115 */
116static int l2tp_ip_recv(struct sk_buff *skb)
117{
118 struct net *net = dev_net(skb->dev);
119 struct sock *sk;
120 u32 session_id;
121 u32 tunnel_id;
122 unsigned char *ptr, *optr;
123 struct l2tp_session *session;
124 struct l2tp_tunnel *tunnel = NULL;
125 struct iphdr *iph;
126 int length;
127
128 if (!pskb_may_pull(skb, 4))
129 goto discard;
130
131 /* Point to L2TP header */
132 optr = ptr = skb->data;
133 session_id = ntohl(*((__be32 *) ptr));
134 ptr += 4;
135
136 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
137 * the session_id. If it is 0, the packet is a L2TP control
138 * frame and the session_id value can be discarded.
139 */
140 if (session_id == 0) {
141 __skb_pull(skb, 4);
142 goto pass_up;
143 }
144
145 /* Ok, this is a data packet. Lookup the session. */
146 session = l2tp_session_get(net, NULL, session_id, true);
147 if (!session)
148 goto discard;
149
150 tunnel = session->tunnel;
151 if (!tunnel)
152 goto discard_sess;
153
154 /* Trace packet contents, if enabled */
155 if (tunnel->debug & L2TP_MSG_DATA) {
156 length = min(32u, skb->len);
157 if (!pskb_may_pull(skb, length))
158 goto discard_sess;
159
160 /* Point to L2TP header */
161 optr = ptr = skb->data;
162 ptr += 4;
163 pr_debug("%s: ip recv\n", tunnel->name);
164 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
165 }
166
167 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
168 goto discard_sess;
169
170 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
171 l2tp_session_dec_refcount(session);
172
173 return 0;
174
175pass_up:
176 /* Get the tunnel_id from the L2TP header */
177 if (!pskb_may_pull(skb, 12))
178 goto discard;
179
180 if ((skb->data[0] & 0xc0) != 0xc0)
181 goto discard;
182
183 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
184 iph = (struct iphdr *)skb_network_header(skb);
185
186 read_lock_bh(&l2tp_ip_lock);
187 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
188 tunnel_id);
189 if (!sk) {
190 read_unlock_bh(&l2tp_ip_lock);
191 goto discard;
192 }
193 sock_hold(sk);
194 read_unlock_bh(&l2tp_ip_lock);
195
196 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
197 goto discard_put;
198
199 nf_reset(skb);
200
201 return sk_receive_skb(sk, skb, 1);
202
203discard_sess:
204 if (session->deref)
205 session->deref(session);
206 l2tp_session_dec_refcount(session);
207 goto discard;
208
209discard_put:
210 sock_put(sk);
211
212discard:
213 kfree_skb(skb);
214 return 0;
215}
216
217static int l2tp_ip_hash(struct sock *sk)
218{
219 if (sk_unhashed(sk)) {
220 write_lock_bh(&l2tp_ip_lock);
221 sk_add_node(sk, &l2tp_ip_table);
222 write_unlock_bh(&l2tp_ip_lock);
223 }
224 return 0;
225}
226
227static void l2tp_ip_unhash(struct sock *sk)
228{
229 if (sk_unhashed(sk))
230 return;
231 write_lock_bh(&l2tp_ip_lock);
232 sk_del_node_init(sk);
233 write_unlock_bh(&l2tp_ip_lock);
234}
235
236static int l2tp_ip_open(struct sock *sk)
237{
238 /* Prevent autobind. We don't have ports. */
239 inet_sk(sk)->inet_num = IPPROTO_L2TP;
240
241 l2tp_ip_hash(sk);
242 return 0;
243}
244
245static void l2tp_ip_close(struct sock *sk, long timeout)
246{
247 write_lock_bh(&l2tp_ip_lock);
248 hlist_del_init(&sk->sk_bind_node);
249 sk_del_node_init(sk);
250 write_unlock_bh(&l2tp_ip_lock);
251 sk_common_release(sk);
252}
253
254static void l2tp_ip_destroy_sock(struct sock *sk)
255{
256 struct sk_buff *skb;
257 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
258
259 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
260 kfree_skb(skb);
261
262 if (tunnel) {
263 l2tp_tunnel_closeall(tunnel);
264 sock_put(sk);
265 }
266
267 sk_refcnt_debug_dec(sk);
268}
269
270static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
271{
272 struct inet_sock *inet = inet_sk(sk);
273 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
274 struct net *net = sock_net(sk);
275 int ret;
276 int chk_addr_ret;
277
278 if (addr_len < sizeof(struct sockaddr_l2tpip))
279 return -EINVAL;
280 if (addr->l2tp_family != AF_INET)
281 return -EINVAL;
282
283 lock_sock(sk);
284
285 ret = -EINVAL;
286 if (!sock_flag(sk, SOCK_ZAPPED))
287 goto out;
288
289 if (sk->sk_state != TCP_CLOSE)
290 goto out;
291
292 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
293 ret = -EADDRNOTAVAIL;
294 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
295 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
296 goto out;
297
298 if (addr->l2tp_addr.s_addr)
299 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
300 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
301 inet->inet_saddr = 0; /* Use device */
302
303 write_lock_bh(&l2tp_ip_lock);
304 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
305 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
306 write_unlock_bh(&l2tp_ip_lock);
307 ret = -EADDRINUSE;
308 goto out;
309 }
310
311 sk_dst_reset(sk);
312 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
313
314 sk_add_bind_node(sk, &l2tp_ip_bind_table);
315 sk_del_node_init(sk);
316 write_unlock_bh(&l2tp_ip_lock);
317
318 ret = 0;
319 sock_reset_flag(sk, SOCK_ZAPPED);
320
321out:
322 release_sock(sk);
323
324 return ret;
325}
326
327static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
328{
329 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
330 int rc;
331
332 if (addr_len < sizeof(*lsa))
333 return -EINVAL;
334
335 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
336 return -EINVAL;
337
338 lock_sock(sk);
339
340 /* Must bind first - autobinding does not work */
341 if (sock_flag(sk, SOCK_ZAPPED)) {
342 rc = -EINVAL;
343 goto out_sk;
344 }
345
346 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
347 if (rc < 0)
348 goto out_sk;
349
350 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
351
352 write_lock_bh(&l2tp_ip_lock);
353 hlist_del_init(&sk->sk_bind_node);
354 sk_add_bind_node(sk, &l2tp_ip_bind_table);
355 write_unlock_bh(&l2tp_ip_lock);
356
357out_sk:
358 release_sock(sk);
359
360 return rc;
361}
362
363static int l2tp_ip_disconnect(struct sock *sk, int flags)
364{
365 if (sock_flag(sk, SOCK_ZAPPED))
366 return 0;
367
368 return __udp_disconnect(sk, flags);
369}
370
371static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
372 int *uaddr_len, int peer)
373{
374 struct sock *sk = sock->sk;
375 struct inet_sock *inet = inet_sk(sk);
376 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
377 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
378
379 memset(lsa, 0, sizeof(*lsa));
380 lsa->l2tp_family = AF_INET;
381 if (peer) {
382 if (!inet->inet_dport)
383 return -ENOTCONN;
384 lsa->l2tp_conn_id = lsk->peer_conn_id;
385 lsa->l2tp_addr.s_addr = inet->inet_daddr;
386 } else {
387 __be32 addr = inet->inet_rcv_saddr;
388 if (!addr)
389 addr = inet->inet_saddr;
390 lsa->l2tp_conn_id = lsk->conn_id;
391 lsa->l2tp_addr.s_addr = addr;
392 }
393 *uaddr_len = sizeof(*lsa);
394 return 0;
395}
396
397static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
398{
399 int rc;
400
401 /* Charge it to the socket, dropping if the queue is full. */
402 rc = sock_queue_rcv_skb(sk, skb);
403 if (rc < 0)
404 goto drop;
405
406 return 0;
407
408drop:
409 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
410 kfree_skb(skb);
411 return 0;
412}
413
414/* Userspace will call sendmsg() on the tunnel socket to send L2TP
415 * control frames.
416 */
417static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
418{
419 struct sk_buff *skb;
420 int rc;
421 struct inet_sock *inet = inet_sk(sk);
422 struct rtable *rt = NULL;
423 struct flowi4 *fl4;
424 int connected = 0;
425 __be32 daddr;
426
427 lock_sock(sk);
428
429 rc = -ENOTCONN;
430 if (sock_flag(sk, SOCK_DEAD))
431 goto out;
432
433 /* Get and verify the address. */
434 if (msg->msg_name) {
435 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
436 rc = -EINVAL;
437 if (msg->msg_namelen < sizeof(*lip))
438 goto out;
439
440 if (lip->l2tp_family != AF_INET) {
441 rc = -EAFNOSUPPORT;
442 if (lip->l2tp_family != AF_UNSPEC)
443 goto out;
444 }
445
446 daddr = lip->l2tp_addr.s_addr;
447 } else {
448 rc = -EDESTADDRREQ;
449 if (sk->sk_state != TCP_ESTABLISHED)
450 goto out;
451
452 daddr = inet->inet_daddr;
453 connected = 1;
454 }
455
456 /* Allocate a socket buffer */
457 rc = -ENOMEM;
458 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
459 4 + len, 0, GFP_KERNEL);
460 if (!skb)
461 goto error;
462
463 /* Reserve space for headers, putting IP header on 4-byte boundary. */
464 skb_reserve(skb, 2 + NET_SKB_PAD);
465 skb_reset_network_header(skb);
466 skb_reserve(skb, sizeof(struct iphdr));
467 skb_reset_transport_header(skb);
468
469 /* Insert 0 session_id */
470 *((__be32 *) skb_put(skb, 4)) = 0;
471
472 /* Copy user data into skb */
473 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
474 if (rc < 0) {
475 kfree_skb(skb);
476 goto error;
477 }
478
479 fl4 = &inet->cork.fl.u.ip4;
480 if (connected)
481 rt = (struct rtable *) __sk_dst_check(sk, 0);
482
483 rcu_read_lock();
484 if (rt == NULL) {
485 const struct ip_options_rcu *inet_opt;
486
487 inet_opt = rcu_dereference(inet->inet_opt);
488
489 /* Use correct destination address if we have options. */
490 if (inet_opt && inet_opt->opt.srr)
491 daddr = inet_opt->opt.faddr;
492
493 /* If this fails, retransmit mechanism of transport layer will
494 * keep trying until route appears or the connection times
495 * itself out.
496 */
497 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
498 daddr, inet->inet_saddr,
499 inet->inet_dport, inet->inet_sport,
500 sk->sk_protocol, RT_CONN_FLAGS(sk),
501 sk->sk_bound_dev_if);
502 if (IS_ERR(rt))
503 goto no_route;
504 if (connected) {
505 sk_setup_caps(sk, &rt->dst);
506 } else {
507 skb_dst_set(skb, &rt->dst);
508 goto xmit;
509 }
510 }
511
512 /* We dont need to clone dst here, it is guaranteed to not disappear.
513 * __dev_xmit_skb() might force a refcount if needed.
514 */
515 skb_dst_set_noref(skb, &rt->dst);
516
517xmit:
518 /* Queue the packet to IP for output */
519 rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
520 rcu_read_unlock();
521
522error:
523 if (rc >= 0)
524 rc = len;
525
526out:
527 release_sock(sk);
528 return rc;
529
530no_route:
531 rcu_read_unlock();
532 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
533 kfree_skb(skb);
534 rc = -EHOSTUNREACH;
535 goto out;
536}
537
538static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
539 size_t len, int noblock, int flags, int *addr_len)
540{
541 struct inet_sock *inet = inet_sk(sk);
542 size_t copied = 0;
543 int err = -EOPNOTSUPP;
544 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
545 struct sk_buff *skb;
546
547 if (flags & MSG_OOB)
548 goto out;
549
550 skb = skb_recv_datagram(sk, flags, noblock, &err);
551 if (!skb)
552 goto out;
553
554 copied = skb->len;
555 if (len < copied) {
556 msg->msg_flags |= MSG_TRUNC;
557 copied = len;
558 }
559
560 err = skb_copy_datagram_msg(skb, 0, msg, copied);
561 if (err)
562 goto done;
563
564 sock_recv_timestamp(msg, sk, skb);
565
566 /* Copy the address. */
567 if (sin) {
568 sin->sin_family = AF_INET;
569 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
570 sin->sin_port = 0;
571 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
572 *addr_len = sizeof(*sin);
573 }
574 if (inet->cmsg_flags)
575 ip_cmsg_recv(msg, skb);
576 if (flags & MSG_TRUNC)
577 copied = skb->len;
578done:
579 skb_free_datagram(sk, skb);
580out:
581 return err ? err : copied;
582}
583
584int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
585{
586 struct sk_buff *skb;
587 int amount;
588
589 switch (cmd) {
590 case SIOCOUTQ:
591 amount = sk_wmem_alloc_get(sk);
592 break;
593 case SIOCINQ:
594 spin_lock_bh(&sk->sk_receive_queue.lock);
595 skb = skb_peek(&sk->sk_receive_queue);
596 amount = skb ? skb->len : 0;
597 spin_unlock_bh(&sk->sk_receive_queue.lock);
598 break;
599
600 default:
601 return -ENOIOCTLCMD;
602 }
603
604 return put_user(amount, (int __user *)arg);
605}
606EXPORT_SYMBOL(l2tp_ioctl);
607
608static struct proto l2tp_ip_prot = {
609 .name = "L2TP/IP",
610 .owner = THIS_MODULE,
611 .init = l2tp_ip_open,
612 .close = l2tp_ip_close,
613 .bind = l2tp_ip_bind,
614 .connect = l2tp_ip_connect,
615 .disconnect = l2tp_ip_disconnect,
616 .ioctl = l2tp_ioctl,
617 .destroy = l2tp_ip_destroy_sock,
618 .setsockopt = ip_setsockopt,
619 .getsockopt = ip_getsockopt,
620 .sendmsg = l2tp_ip_sendmsg,
621 .recvmsg = l2tp_ip_recvmsg,
622 .backlog_rcv = l2tp_ip_backlog_recv,
623 .hash = l2tp_ip_hash,
624 .unhash = l2tp_ip_unhash,
625 .obj_size = sizeof(struct l2tp_ip_sock),
626#ifdef CONFIG_COMPAT
627 .compat_setsockopt = compat_ip_setsockopt,
628 .compat_getsockopt = compat_ip_getsockopt,
629#endif
630};
631
632static const struct proto_ops l2tp_ip_ops = {
633 .family = PF_INET,
634 .owner = THIS_MODULE,
635 .release = inet_release,
636 .bind = inet_bind,
637 .connect = inet_dgram_connect,
638 .socketpair = sock_no_socketpair,
639 .accept = sock_no_accept,
640 .getname = l2tp_ip_getname,
641 .poll = datagram_poll,
642 .ioctl = inet_ioctl,
643 .listen = sock_no_listen,
644 .shutdown = inet_shutdown,
645 .setsockopt = sock_common_setsockopt,
646 .getsockopt = sock_common_getsockopt,
647 .sendmsg = inet_sendmsg,
648 .recvmsg = sock_common_recvmsg,
649 .mmap = sock_no_mmap,
650 .sendpage = sock_no_sendpage,
651#ifdef CONFIG_COMPAT
652 .compat_setsockopt = compat_sock_common_setsockopt,
653 .compat_getsockopt = compat_sock_common_getsockopt,
654#endif
655};
656
657static struct inet_protosw l2tp_ip_protosw = {
658 .type = SOCK_DGRAM,
659 .protocol = IPPROTO_L2TP,
660 .prot = &l2tp_ip_prot,
661 .ops = &l2tp_ip_ops,
662};
663
664static struct net_protocol l2tp_ip_protocol __read_mostly = {
665 .handler = l2tp_ip_recv,
666 .netns_ok = 1,
667};
668
669static int __init l2tp_ip_init(void)
670{
671 int err;
672
673 pr_info("L2TP IP encapsulation support (L2TPv3)\n");
674
675 err = proto_register(&l2tp_ip_prot, 1);
676 if (err != 0)
677 goto out;
678
679 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
680 if (err)
681 goto out1;
682
683 inet_register_protosw(&l2tp_ip_protosw);
684 return 0;
685
686out1:
687 proto_unregister(&l2tp_ip_prot);
688out:
689 return err;
690}
691
692static void __exit l2tp_ip_exit(void)
693{
694 inet_unregister_protosw(&l2tp_ip_protosw);
695 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
696 proto_unregister(&l2tp_ip_prot);
697}
698
699module_init(l2tp_ip_init);
700module_exit(l2tp_ip_exit);
701
702MODULE_LICENSE("GPL");
703MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
704MODULE_DESCRIPTION("L2TP over IP");
705MODULE_VERSION("1.0");
706
707/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
708 * enums
709 */
710MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
711MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);