b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (c) 2013 Nicira, Inc. |
| 4 | */ |
| 5 | |
| 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | |
| 8 | #include <linux/capability.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/uaccess.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <linux/netdevice.h> |
| 16 | #include <linux/in.h> |
| 17 | #include <linux/tcp.h> |
| 18 | #include <linux/udp.h> |
| 19 | #include <linux/if_arp.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/in6.h> |
| 22 | #include <linux/inetdevice.h> |
| 23 | #include <linux/igmp.h> |
| 24 | #include <linux/netfilter_ipv4.h> |
| 25 | #include <linux/etherdevice.h> |
| 26 | #include <linux/if_ether.h> |
| 27 | #include <linux/if_vlan.h> |
| 28 | #include <linux/rculist.h> |
| 29 | #include <linux/err.h> |
| 30 | |
| 31 | #include <net/sock.h> |
| 32 | #include <net/ip.h> |
| 33 | #include <net/icmp.h> |
| 34 | #include <net/protocol.h> |
| 35 | #include <net/ip_tunnels.h> |
| 36 | #include <net/arp.h> |
| 37 | #include <net/checksum.h> |
| 38 | #include <net/dsfield.h> |
| 39 | #include <net/inet_ecn.h> |
| 40 | #include <net/xfrm.h> |
| 41 | #include <net/net_namespace.h> |
| 42 | #include <net/netns/generic.h> |
| 43 | #include <net/rtnetlink.h> |
| 44 | #include <net/udp.h> |
| 45 | #include <net/dst_metadata.h> |
| 46 | |
| 47 | #if IS_ENABLED(CONFIG_IPV6) |
| 48 | #include <net/ipv6.h> |
| 49 | #include <net/ip6_fib.h> |
| 50 | #include <net/ip6_route.h> |
| 51 | #endif |
| 52 | |
| 53 | static unsigned int ip_tunnel_hash(__be32 key, __be32 remote) |
| 54 | { |
| 55 | return hash_32((__force u32)key ^ (__force u32)remote, |
| 56 | IP_TNL_HASH_BITS); |
| 57 | } |
| 58 | |
| 59 | static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, |
| 60 | __be16 flags, __be32 key) |
| 61 | { |
| 62 | if (p->i_flags & TUNNEL_KEY) { |
| 63 | if (flags & TUNNEL_KEY) |
| 64 | return key == p->i_key; |
| 65 | else |
| 66 | /* key expected, none present */ |
| 67 | return false; |
| 68 | } else |
| 69 | return !(flags & TUNNEL_KEY); |
| 70 | } |
| 71 | |
| 72 | /* Fallback tunnel: no source, no destination, no key, no options |
| 73 | |
| 74 | Tunnel hash table: |
| 75 | We require exact key match i.e. if a key is present in packet |
| 76 | it will match only tunnel with the same key; if it is not present, |
| 77 | it will match only keyless tunnel. |
| 78 | |
| 79 | All keysless packets, if not matched configured keyless tunnels |
| 80 | will match fallback tunnel. |
| 81 | Given src, dst and key, find appropriate for input tunnel. |
| 82 | */ |
| 83 | struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, |
| 84 | int link, __be16 flags, |
| 85 | __be32 remote, __be32 local, |
| 86 | __be32 key) |
| 87 | { |
| 88 | struct ip_tunnel *t, *cand = NULL; |
| 89 | struct hlist_head *head; |
| 90 | struct net_device *ndev; |
| 91 | unsigned int hash; |
| 92 | |
| 93 | hash = ip_tunnel_hash(key, remote); |
| 94 | head = &itn->tunnels[hash]; |
| 95 | |
| 96 | hlist_for_each_entry_rcu(t, head, hash_node) { |
| 97 | if (local != t->parms.iph.saddr || |
| 98 | remote != t->parms.iph.daddr || |
| 99 | !(t->dev->flags & IFF_UP)) |
| 100 | continue; |
| 101 | |
| 102 | if (!ip_tunnel_key_match(&t->parms, flags, key)) |
| 103 | continue; |
| 104 | |
| 105 | if (t->parms.link == link) |
| 106 | return t; |
| 107 | else |
| 108 | cand = t; |
| 109 | } |
| 110 | |
| 111 | hlist_for_each_entry_rcu(t, head, hash_node) { |
| 112 | if (remote != t->parms.iph.daddr || |
| 113 | t->parms.iph.saddr != 0 || |
| 114 | !(t->dev->flags & IFF_UP)) |
| 115 | continue; |
| 116 | |
| 117 | if (!ip_tunnel_key_match(&t->parms, flags, key)) |
| 118 | continue; |
| 119 | |
| 120 | if (t->parms.link == link) |
| 121 | return t; |
| 122 | else if (!cand) |
| 123 | cand = t; |
| 124 | } |
| 125 | |
| 126 | hash = ip_tunnel_hash(key, 0); |
| 127 | head = &itn->tunnels[hash]; |
| 128 | |
| 129 | hlist_for_each_entry_rcu(t, head, hash_node) { |
| 130 | if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) && |
| 131 | (local != t->parms.iph.daddr || !ipv4_is_multicast(local))) |
| 132 | continue; |
| 133 | |
| 134 | if (!(t->dev->flags & IFF_UP)) |
| 135 | continue; |
| 136 | |
| 137 | if (!ip_tunnel_key_match(&t->parms, flags, key)) |
| 138 | continue; |
| 139 | |
| 140 | if (t->parms.link == link) |
| 141 | return t; |
| 142 | else if (!cand) |
| 143 | cand = t; |
| 144 | } |
| 145 | |
| 146 | hlist_for_each_entry_rcu(t, head, hash_node) { |
| 147 | if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) || |
| 148 | t->parms.iph.saddr != 0 || |
| 149 | t->parms.iph.daddr != 0 || |
| 150 | !(t->dev->flags & IFF_UP)) |
| 151 | continue; |
| 152 | |
| 153 | if (t->parms.link == link) |
| 154 | return t; |
| 155 | else if (!cand) |
| 156 | cand = t; |
| 157 | } |
| 158 | |
| 159 | if (cand) |
| 160 | return cand; |
| 161 | |
| 162 | t = rcu_dereference(itn->collect_md_tun); |
| 163 | if (t && t->dev->flags & IFF_UP) |
| 164 | return t; |
| 165 | |
| 166 | ndev = READ_ONCE(itn->fb_tunnel_dev); |
| 167 | if (ndev && ndev->flags & IFF_UP) |
| 168 | return netdev_priv(ndev); |
| 169 | |
| 170 | return NULL; |
| 171 | } |
| 172 | EXPORT_SYMBOL_GPL(ip_tunnel_lookup); |
| 173 | |
| 174 | static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn, |
| 175 | struct ip_tunnel_parm *parms) |
| 176 | { |
| 177 | unsigned int h; |
| 178 | __be32 remote; |
| 179 | __be32 i_key = parms->i_key; |
| 180 | |
| 181 | if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr)) |
| 182 | remote = parms->iph.daddr; |
| 183 | else |
| 184 | remote = 0; |
| 185 | |
| 186 | if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI)) |
| 187 | i_key = 0; |
| 188 | |
| 189 | h = ip_tunnel_hash(i_key, remote); |
| 190 | return &itn->tunnels[h]; |
| 191 | } |
| 192 | |
| 193 | static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t) |
| 194 | { |
| 195 | struct hlist_head *head = ip_bucket(itn, &t->parms); |
| 196 | |
| 197 | if (t->collect_md) |
| 198 | rcu_assign_pointer(itn->collect_md_tun, t); |
| 199 | hlist_add_head_rcu(&t->hash_node, head); |
| 200 | } |
| 201 | |
| 202 | static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t) |
| 203 | { |
| 204 | if (t->collect_md) |
| 205 | rcu_assign_pointer(itn->collect_md_tun, NULL); |
| 206 | hlist_del_init_rcu(&t->hash_node); |
| 207 | } |
| 208 | |
| 209 | static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn, |
| 210 | struct ip_tunnel_parm *parms, |
| 211 | int type) |
| 212 | { |
| 213 | __be32 remote = parms->iph.daddr; |
| 214 | __be32 local = parms->iph.saddr; |
| 215 | __be32 key = parms->i_key; |
| 216 | __be16 flags = parms->i_flags; |
| 217 | int link = parms->link; |
| 218 | struct ip_tunnel *t = NULL; |
| 219 | struct hlist_head *head = ip_bucket(itn, parms); |
| 220 | |
| 221 | hlist_for_each_entry_rcu(t, head, hash_node) { |
| 222 | if (local == t->parms.iph.saddr && |
| 223 | remote == t->parms.iph.daddr && |
| 224 | link == t->parms.link && |
| 225 | type == t->dev->type && |
| 226 | ip_tunnel_key_match(&t->parms, flags, key)) |
| 227 | break; |
| 228 | } |
| 229 | return t; |
| 230 | } |
| 231 | |
| 232 | static struct net_device *__ip_tunnel_create(struct net *net, |
| 233 | const struct rtnl_link_ops *ops, |
| 234 | struct ip_tunnel_parm *parms) |
| 235 | { |
| 236 | int err; |
| 237 | struct ip_tunnel *tunnel; |
| 238 | struct net_device *dev; |
| 239 | char name[IFNAMSIZ]; |
| 240 | |
| 241 | err = -E2BIG; |
| 242 | if (parms->name[0]) { |
| 243 | if (!dev_valid_name(parms->name)) |
| 244 | goto failed; |
| 245 | strlcpy(name, parms->name, IFNAMSIZ); |
| 246 | } else { |
| 247 | if (strlen(ops->kind) > (IFNAMSIZ - 3)) |
| 248 | goto failed; |
| 249 | strcpy(name, ops->kind); |
| 250 | strcat(name, "%d"); |
| 251 | } |
| 252 | |
| 253 | ASSERT_RTNL(); |
| 254 | dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup); |
| 255 | if (!dev) { |
| 256 | err = -ENOMEM; |
| 257 | goto failed; |
| 258 | } |
| 259 | dev_net_set(dev, net); |
| 260 | |
| 261 | dev->rtnl_link_ops = ops; |
| 262 | |
| 263 | tunnel = netdev_priv(dev); |
| 264 | tunnel->parms = *parms; |
| 265 | tunnel->net = net; |
| 266 | |
| 267 | err = register_netdevice(dev); |
| 268 | if (err) |
| 269 | goto failed_free; |
| 270 | |
| 271 | return dev; |
| 272 | |
| 273 | failed_free: |
| 274 | free_netdev(dev); |
| 275 | failed: |
| 276 | return ERR_PTR(err); |
| 277 | } |
| 278 | |
| 279 | static int ip_tunnel_bind_dev(struct net_device *dev) |
| 280 | { |
| 281 | struct net_device *tdev = NULL; |
| 282 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 283 | const struct iphdr *iph; |
| 284 | int hlen = LL_MAX_HEADER; |
| 285 | int mtu = ETH_DATA_LEN; |
| 286 | int t_hlen = tunnel->hlen + sizeof(struct iphdr); |
| 287 | |
| 288 | iph = &tunnel->parms.iph; |
| 289 | |
| 290 | /* Guess output device to choose reasonable mtu and needed_headroom */ |
| 291 | if (iph->daddr) { |
| 292 | struct flowi4 fl4; |
| 293 | struct rtable *rt; |
| 294 | |
| 295 | ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr, |
| 296 | iph->saddr, tunnel->parms.o_key, |
| 297 | RT_TOS(iph->tos), tunnel->parms.link, |
| 298 | tunnel->fwmark, 0); |
| 299 | rt = ip_route_output_key(tunnel->net, &fl4); |
| 300 | |
| 301 | if (!IS_ERR(rt)) { |
| 302 | tdev = rt->dst.dev; |
| 303 | ip_rt_put(rt); |
| 304 | } |
| 305 | if (dev->type != ARPHRD_ETHER) |
| 306 | dev->flags |= IFF_POINTOPOINT; |
| 307 | |
| 308 | dst_cache_reset(&tunnel->dst_cache); |
| 309 | } |
| 310 | |
| 311 | if (!tdev && tunnel->parms.link) |
| 312 | tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link); |
| 313 | |
| 314 | if (tdev) { |
| 315 | hlen = tdev->hard_header_len + tdev->needed_headroom; |
| 316 | mtu = min(tdev->mtu, IP_MAX_MTU); |
| 317 | } |
| 318 | |
| 319 | dev->needed_headroom = t_hlen + hlen; |
| 320 | mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0); |
| 321 | |
| 322 | if (mtu < IPV4_MIN_MTU) |
| 323 | mtu = IPV4_MIN_MTU; |
| 324 | |
| 325 | return mtu; |
| 326 | } |
| 327 | |
| 328 | static struct ip_tunnel *ip_tunnel_create(struct net *net, |
| 329 | struct ip_tunnel_net *itn, |
| 330 | struct ip_tunnel_parm *parms) |
| 331 | { |
| 332 | struct ip_tunnel *nt; |
| 333 | struct net_device *dev; |
| 334 | int t_hlen; |
| 335 | int mtu; |
| 336 | int err; |
| 337 | |
| 338 | dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms); |
| 339 | if (IS_ERR(dev)) |
| 340 | return ERR_CAST(dev); |
| 341 | |
| 342 | mtu = ip_tunnel_bind_dev(dev); |
| 343 | err = dev_set_mtu(dev, mtu); |
| 344 | if (err) |
| 345 | goto err_dev_set_mtu; |
| 346 | |
| 347 | nt = netdev_priv(dev); |
| 348 | t_hlen = nt->hlen + sizeof(struct iphdr); |
| 349 | dev->min_mtu = ETH_MIN_MTU; |
| 350 | dev->max_mtu = IP_MAX_MTU - t_hlen; |
| 351 | if (dev->type == ARPHRD_ETHER) |
| 352 | dev->max_mtu -= dev->hard_header_len; |
| 353 | |
| 354 | ip_tunnel_add(itn, nt); |
| 355 | return nt; |
| 356 | |
| 357 | err_dev_set_mtu: |
| 358 | unregister_netdevice(dev); |
| 359 | return ERR_PTR(err); |
| 360 | } |
| 361 | |
| 362 | int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, |
| 363 | const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, |
| 364 | bool log_ecn_error) |
| 365 | { |
| 366 | struct pcpu_sw_netstats *tstats; |
| 367 | const struct iphdr *iph = ip_hdr(skb); |
| 368 | int nh, err; |
| 369 | |
| 370 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
| 371 | if (ipv4_is_multicast(iph->daddr)) { |
| 372 | tunnel->dev->stats.multicast++; |
| 373 | skb->pkt_type = PACKET_BROADCAST; |
| 374 | } |
| 375 | #endif |
| 376 | |
| 377 | if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) || |
| 378 | ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) { |
| 379 | tunnel->dev->stats.rx_crc_errors++; |
| 380 | tunnel->dev->stats.rx_errors++; |
| 381 | goto drop; |
| 382 | } |
| 383 | |
| 384 | if (tunnel->parms.i_flags&TUNNEL_SEQ) { |
| 385 | if (!(tpi->flags&TUNNEL_SEQ) || |
| 386 | (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { |
| 387 | tunnel->dev->stats.rx_fifo_errors++; |
| 388 | tunnel->dev->stats.rx_errors++; |
| 389 | goto drop; |
| 390 | } |
| 391 | tunnel->i_seqno = ntohl(tpi->seq) + 1; |
| 392 | } |
| 393 | |
| 394 | /* Save offset of outer header relative to skb->head, |
| 395 | * because we are going to reset the network header to the inner header |
| 396 | * and might change skb->head. |
| 397 | */ |
| 398 | nh = skb_network_header(skb) - skb->head; |
| 399 | |
| 400 | skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0); |
| 401 | |
| 402 | if (!pskb_inet_may_pull(skb)) { |
| 403 | DEV_STATS_INC(tunnel->dev, rx_length_errors); |
| 404 | DEV_STATS_INC(tunnel->dev, rx_errors); |
| 405 | goto drop; |
| 406 | } |
| 407 | iph = (struct iphdr *)(skb->head + nh); |
| 408 | |
| 409 | err = IP_ECN_decapsulate(iph, skb); |
| 410 | if (unlikely(err)) { |
| 411 | if (log_ecn_error) |
| 412 | net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", |
| 413 | &iph->saddr, iph->tos); |
| 414 | if (err > 1) { |
| 415 | ++tunnel->dev->stats.rx_frame_errors; |
| 416 | ++tunnel->dev->stats.rx_errors; |
| 417 | goto drop; |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
| 422 | u64_stats_update_begin(&tstats->syncp); |
| 423 | tstats->rx_packets++; |
| 424 | tstats->rx_bytes += skb->len; |
| 425 | u64_stats_update_end(&tstats->syncp); |
| 426 | |
| 427 | skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); |
| 428 | |
| 429 | if (tunnel->dev->type == ARPHRD_ETHER) { |
| 430 | skb->protocol = eth_type_trans(skb, tunnel->dev); |
| 431 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
| 432 | } else { |
| 433 | skb->dev = tunnel->dev; |
| 434 | } |
| 435 | |
| 436 | if (tun_dst) |
| 437 | skb_dst_set(skb, (struct dst_entry *)tun_dst); |
| 438 | |
| 439 | gro_cells_receive(&tunnel->gro_cells, skb); |
| 440 | return 0; |
| 441 | |
| 442 | drop: |
| 443 | if (tun_dst) |
| 444 | dst_release((struct dst_entry *)tun_dst); |
| 445 | kfree_skb(skb); |
| 446 | return 0; |
| 447 | } |
| 448 | EXPORT_SYMBOL_GPL(ip_tunnel_rcv); |
| 449 | |
| 450 | int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops, |
| 451 | unsigned int num) |
| 452 | { |
| 453 | if (num >= MAX_IPTUN_ENCAP_OPS) |
| 454 | return -ERANGE; |
| 455 | |
| 456 | return !cmpxchg((const struct ip_tunnel_encap_ops **) |
| 457 | &iptun_encaps[num], |
| 458 | NULL, ops) ? 0 : -1; |
| 459 | } |
| 460 | EXPORT_SYMBOL(ip_tunnel_encap_add_ops); |
| 461 | |
| 462 | int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops, |
| 463 | unsigned int num) |
| 464 | { |
| 465 | int ret; |
| 466 | |
| 467 | if (num >= MAX_IPTUN_ENCAP_OPS) |
| 468 | return -ERANGE; |
| 469 | |
| 470 | ret = (cmpxchg((const struct ip_tunnel_encap_ops **) |
| 471 | &iptun_encaps[num], |
| 472 | ops, NULL) == ops) ? 0 : -1; |
| 473 | |
| 474 | synchronize_net(); |
| 475 | |
| 476 | return ret; |
| 477 | } |
| 478 | EXPORT_SYMBOL(ip_tunnel_encap_del_ops); |
| 479 | |
| 480 | int ip_tunnel_encap_setup(struct ip_tunnel *t, |
| 481 | struct ip_tunnel_encap *ipencap) |
| 482 | { |
| 483 | int hlen; |
| 484 | |
| 485 | memset(&t->encap, 0, sizeof(t->encap)); |
| 486 | |
| 487 | hlen = ip_encap_hlen(ipencap); |
| 488 | if (hlen < 0) |
| 489 | return hlen; |
| 490 | |
| 491 | t->encap.type = ipencap->type; |
| 492 | t->encap.sport = ipencap->sport; |
| 493 | t->encap.dport = ipencap->dport; |
| 494 | t->encap.flags = ipencap->flags; |
| 495 | |
| 496 | t->encap_hlen = hlen; |
| 497 | t->hlen = t->encap_hlen + t->tun_hlen; |
| 498 | |
| 499 | return 0; |
| 500 | } |
| 501 | EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup); |
| 502 | |
| 503 | static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, |
| 504 | struct rtable *rt, __be16 df, |
| 505 | const struct iphdr *inner_iph, |
| 506 | int tunnel_hlen, __be32 dst, bool md) |
| 507 | { |
| 508 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 509 | int pkt_size; |
| 510 | int mtu; |
| 511 | |
| 512 | tunnel_hlen = md ? tunnel_hlen : tunnel->hlen; |
| 513 | pkt_size = skb->len - tunnel_hlen; |
| 514 | pkt_size -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0; |
| 515 | |
| 516 | if (df) { |
| 517 | mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen); |
| 518 | mtu -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0; |
| 519 | } else { |
| 520 | mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
| 521 | } |
| 522 | |
| 523 | if (skb_valid_dst(skb)) |
| 524 | skb_dst_update_pmtu_no_confirm(skb, mtu); |
| 525 | |
| 526 | if (skb->protocol == htons(ETH_P_IP)) { |
| 527 | if (!skb_is_gso(skb) && |
| 528 | (inner_iph->frag_off & htons(IP_DF)) && |
| 529 | mtu < pkt_size) { |
| 530 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
| 531 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
| 532 | return -E2BIG; |
| 533 | } |
| 534 | } |
| 535 | #if IS_ENABLED(CONFIG_IPV6) |
| 536 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
| 537 | struct rt6_info *rt6; |
| 538 | __be32 daddr; |
| 539 | |
| 540 | rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) : |
| 541 | NULL; |
| 542 | daddr = md ? dst : tunnel->parms.iph.daddr; |
| 543 | |
| 544 | if (rt6 && mtu < dst_mtu(skb_dst(skb)) && |
| 545 | mtu >= IPV6_MIN_MTU) { |
| 546 | if ((daddr && !ipv4_is_multicast(daddr)) || |
| 547 | rt6->rt6i_dst.plen == 128) { |
| 548 | rt6->rt6i_flags |= RTF_MODIFIED; |
| 549 | dst_metric_set(skb_dst(skb), RTAX_MTU, mtu); |
| 550 | } |
| 551 | } |
| 552 | |
| 553 | if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU && |
| 554 | mtu < pkt_size) { |
| 555 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
| 556 | return -E2BIG; |
| 557 | } |
| 558 | } |
| 559 | #endif |
| 560 | return 0; |
| 561 | } |
| 562 | |
| 563 | static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom) |
| 564 | { |
| 565 | /* we must cap headroom to some upperlimit, else pskb_expand_head |
| 566 | * will overflow header offsets in skb_headers_offset_update(). |
| 567 | */ |
| 568 | static const unsigned int max_allowed = 512; |
| 569 | |
| 570 | if (headroom > max_allowed) |
| 571 | headroom = max_allowed; |
| 572 | |
| 573 | if (headroom > READ_ONCE(dev->needed_headroom)) |
| 574 | WRITE_ONCE(dev->needed_headroom, headroom); |
| 575 | } |
| 576 | |
| 577 | void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
| 578 | u8 proto, int tunnel_hlen) |
| 579 | { |
| 580 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 581 | u32 headroom = sizeof(struct iphdr); |
| 582 | struct ip_tunnel_info *tun_info; |
| 583 | const struct ip_tunnel_key *key; |
| 584 | const struct iphdr *inner_iph; |
| 585 | struct rtable *rt = NULL; |
| 586 | struct flowi4 fl4; |
| 587 | __be16 df = 0; |
| 588 | u8 tos, ttl; |
| 589 | bool use_cache; |
| 590 | |
| 591 | tun_info = skb_tunnel_info(skb); |
| 592 | if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || |
| 593 | ip_tunnel_info_af(tun_info) != AF_INET)) |
| 594 | goto tx_error; |
| 595 | key = &tun_info->key; |
| 596 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
| 597 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); |
| 598 | tos = key->tos; |
| 599 | if (tos == 1) { |
| 600 | if (skb->protocol == htons(ETH_P_IP)) |
| 601 | tos = inner_iph->tos; |
| 602 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 603 | tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); |
| 604 | } |
| 605 | ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, |
| 606 | tunnel_id_to_key32(key->tun_id), RT_TOS(tos), |
| 607 | 0, skb->mark, skb_get_hash(skb)); |
| 608 | if (tunnel->encap.type != TUNNEL_ENCAP_NONE) |
| 609 | goto tx_error; |
| 610 | |
| 611 | use_cache = ip_tunnel_dst_cache_usable(skb, tun_info); |
| 612 | if (use_cache) |
| 613 | rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr); |
| 614 | if (!rt) { |
| 615 | rt = ip_route_output_key(tunnel->net, &fl4); |
| 616 | if (IS_ERR(rt)) { |
| 617 | dev->stats.tx_carrier_errors++; |
| 618 | goto tx_error; |
| 619 | } |
| 620 | if (use_cache) |
| 621 | dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst, |
| 622 | fl4.saddr); |
| 623 | } |
| 624 | if (rt->dst.dev == dev) { |
| 625 | ip_rt_put(rt); |
| 626 | dev->stats.collisions++; |
| 627 | goto tx_error; |
| 628 | } |
| 629 | |
| 630 | if (key->tun_flags & TUNNEL_DONT_FRAGMENT) |
| 631 | df = htons(IP_DF); |
| 632 | if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen, |
| 633 | key->u.ipv4.dst, true)) { |
| 634 | ip_rt_put(rt); |
| 635 | goto tx_error; |
| 636 | } |
| 637 | |
| 638 | tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); |
| 639 | ttl = key->ttl; |
| 640 | if (ttl == 0) { |
| 641 | if (skb->protocol == htons(ETH_P_IP)) |
| 642 | ttl = inner_iph->ttl; |
| 643 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 644 | ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit; |
| 645 | else |
| 646 | ttl = ip4_dst_hoplimit(&rt->dst); |
| 647 | } |
| 648 | |
| 649 | headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; |
| 650 | if (skb_cow_head(skb, headroom)) { |
| 651 | ip_rt_put(rt); |
| 652 | goto tx_dropped; |
| 653 | } |
| 654 | |
| 655 | ip_tunnel_adj_headroom(dev, headroom); |
| 656 | |
| 657 | iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl, |
| 658 | df, !net_eq(tunnel->net, dev_net(dev))); |
| 659 | return; |
| 660 | tx_error: |
| 661 | dev->stats.tx_errors++; |
| 662 | goto kfree; |
| 663 | tx_dropped: |
| 664 | dev->stats.tx_dropped++; |
| 665 | kfree: |
| 666 | kfree_skb(skb); |
| 667 | } |
| 668 | EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit); |
| 669 | |
| 670 | void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
| 671 | const struct iphdr *tnl_params, u8 protocol) |
| 672 | { |
| 673 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 674 | struct ip_tunnel_info *tun_info = NULL; |
| 675 | const struct iphdr *inner_iph; |
| 676 | unsigned int max_headroom; /* The extra header space needed */ |
| 677 | struct rtable *rt = NULL; /* Route to the other host */ |
| 678 | bool use_cache = false; |
| 679 | struct flowi4 fl4; |
| 680 | bool md = false; |
| 681 | bool connected; |
| 682 | u8 tos, ttl; |
| 683 | __be32 dst; |
| 684 | __be16 df; |
| 685 | |
| 686 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); |
| 687 | connected = (tunnel->parms.iph.daddr != 0); |
| 688 | |
| 689 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
| 690 | |
| 691 | dst = tnl_params->daddr; |
| 692 | if (dst == 0) { |
| 693 | /* NBMA tunnel */ |
| 694 | |
| 695 | if (!skb_dst(skb)) { |
| 696 | dev->stats.tx_fifo_errors++; |
| 697 | goto tx_error; |
| 698 | } |
| 699 | |
| 700 | tun_info = skb_tunnel_info(skb); |
| 701 | if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) && |
| 702 | ip_tunnel_info_af(tun_info) == AF_INET && |
| 703 | tun_info->key.u.ipv4.dst) { |
| 704 | dst = tun_info->key.u.ipv4.dst; |
| 705 | md = true; |
| 706 | connected = true; |
| 707 | } |
| 708 | else if (skb->protocol == htons(ETH_P_IP)) { |
| 709 | rt = skb_rtable(skb); |
| 710 | dst = rt_nexthop(rt, inner_iph->daddr); |
| 711 | } |
| 712 | #if IS_ENABLED(CONFIG_IPV6) |
| 713 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
| 714 | const struct in6_addr *addr6; |
| 715 | struct neighbour *neigh; |
| 716 | bool do_tx_error_icmp; |
| 717 | int addr_type; |
| 718 | |
| 719 | neigh = dst_neigh_lookup(skb_dst(skb), |
| 720 | &ipv6_hdr(skb)->daddr); |
| 721 | if (!neigh) |
| 722 | goto tx_error; |
| 723 | |
| 724 | addr6 = (const struct in6_addr *)&neigh->primary_key; |
| 725 | addr_type = ipv6_addr_type(addr6); |
| 726 | |
| 727 | if (addr_type == IPV6_ADDR_ANY) { |
| 728 | addr6 = &ipv6_hdr(skb)->daddr; |
| 729 | addr_type = ipv6_addr_type(addr6); |
| 730 | } |
| 731 | |
| 732 | if ((addr_type & IPV6_ADDR_COMPATv4) == 0) |
| 733 | do_tx_error_icmp = true; |
| 734 | else { |
| 735 | do_tx_error_icmp = false; |
| 736 | dst = addr6->s6_addr32[3]; |
| 737 | } |
| 738 | neigh_release(neigh); |
| 739 | if (do_tx_error_icmp) |
| 740 | goto tx_error_icmp; |
| 741 | } |
| 742 | #endif |
| 743 | else |
| 744 | goto tx_error; |
| 745 | |
| 746 | if (!md) |
| 747 | connected = false; |
| 748 | } |
| 749 | |
| 750 | tos = tnl_params->tos; |
| 751 | if (tos & 0x1) { |
| 752 | tos &= ~0x1; |
| 753 | if (skb->protocol == htons(ETH_P_IP)) { |
| 754 | tos = inner_iph->tos; |
| 755 | connected = false; |
| 756 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
| 757 | tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); |
| 758 | connected = false; |
| 759 | } |
| 760 | } |
| 761 | |
| 762 | ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr, |
| 763 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, |
| 764 | tunnel->fwmark, skb_get_hash(skb)); |
| 765 | |
| 766 | if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) |
| 767 | goto tx_error; |
| 768 | |
| 769 | if (connected && md) { |
| 770 | use_cache = ip_tunnel_dst_cache_usable(skb, tun_info); |
| 771 | if (use_cache) |
| 772 | rt = dst_cache_get_ip4(&tun_info->dst_cache, |
| 773 | &fl4.saddr); |
| 774 | } else { |
| 775 | rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache, |
| 776 | &fl4.saddr) : NULL; |
| 777 | } |
| 778 | |
| 779 | if (!rt) { |
| 780 | rt = ip_route_output_key(tunnel->net, &fl4); |
| 781 | |
| 782 | if (IS_ERR(rt)) { |
| 783 | dev->stats.tx_carrier_errors++; |
| 784 | goto tx_error; |
| 785 | } |
| 786 | if (use_cache) |
| 787 | dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst, |
| 788 | fl4.saddr); |
| 789 | else if (!md && connected) |
| 790 | dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, |
| 791 | fl4.saddr); |
| 792 | } |
| 793 | |
| 794 | if (rt->dst.dev == dev) { |
| 795 | ip_rt_put(rt); |
| 796 | dev->stats.collisions++; |
| 797 | goto tx_error; |
| 798 | } |
| 799 | |
| 800 | df = tnl_params->frag_off; |
| 801 | if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) |
| 802 | df |= (inner_iph->frag_off & htons(IP_DF)); |
| 803 | |
| 804 | if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) { |
| 805 | ip_rt_put(rt); |
| 806 | goto tx_error; |
| 807 | } |
| 808 | |
| 809 | if (tunnel->err_count > 0) { |
| 810 | if (time_before(jiffies, |
| 811 | tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { |
| 812 | tunnel->err_count--; |
| 813 | |
| 814 | dst_link_failure(skb); |
| 815 | } else |
| 816 | tunnel->err_count = 0; |
| 817 | } |
| 818 | |
| 819 | tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); |
| 820 | ttl = tnl_params->ttl; |
| 821 | if (ttl == 0) { |
| 822 | if (skb->protocol == htons(ETH_P_IP)) |
| 823 | ttl = inner_iph->ttl; |
| 824 | #if IS_ENABLED(CONFIG_IPV6) |
| 825 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 826 | ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit; |
| 827 | #endif |
| 828 | else |
| 829 | ttl = ip4_dst_hoplimit(&rt->dst); |
| 830 | } |
| 831 | |
| 832 | max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) |
| 833 | + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); |
| 834 | |
| 835 | if (skb_cow_head(skb, max_headroom)) { |
| 836 | ip_rt_put(rt); |
| 837 | dev->stats.tx_dropped++; |
| 838 | kfree_skb(skb); |
| 839 | return; |
| 840 | } |
| 841 | |
| 842 | ip_tunnel_adj_headroom(dev, max_headroom); |
| 843 | |
| 844 | iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl, |
| 845 | df, !net_eq(tunnel->net, dev_net(dev))); |
| 846 | return; |
| 847 | |
| 848 | #if IS_ENABLED(CONFIG_IPV6) |
| 849 | tx_error_icmp: |
| 850 | dst_link_failure(skb); |
| 851 | #endif |
| 852 | tx_error: |
| 853 | dev->stats.tx_errors++; |
| 854 | kfree_skb(skb); |
| 855 | } |
| 856 | EXPORT_SYMBOL_GPL(ip_tunnel_xmit); |
| 857 | |
| 858 | static void ip_tunnel_update(struct ip_tunnel_net *itn, |
| 859 | struct ip_tunnel *t, |
| 860 | struct net_device *dev, |
| 861 | struct ip_tunnel_parm *p, |
| 862 | bool set_mtu, |
| 863 | __u32 fwmark) |
| 864 | { |
| 865 | ip_tunnel_del(itn, t); |
| 866 | t->parms.iph.saddr = p->iph.saddr; |
| 867 | t->parms.iph.daddr = p->iph.daddr; |
| 868 | t->parms.i_key = p->i_key; |
| 869 | t->parms.o_key = p->o_key; |
| 870 | if (dev->type != ARPHRD_ETHER) { |
| 871 | memcpy(dev->dev_addr, &p->iph.saddr, 4); |
| 872 | memcpy(dev->broadcast, &p->iph.daddr, 4); |
| 873 | } |
| 874 | ip_tunnel_add(itn, t); |
| 875 | |
| 876 | t->parms.iph.ttl = p->iph.ttl; |
| 877 | t->parms.iph.tos = p->iph.tos; |
| 878 | t->parms.iph.frag_off = p->iph.frag_off; |
| 879 | |
| 880 | if (t->parms.link != p->link || t->fwmark != fwmark) { |
| 881 | int mtu; |
| 882 | |
| 883 | t->parms.link = p->link; |
| 884 | t->fwmark = fwmark; |
| 885 | mtu = ip_tunnel_bind_dev(dev); |
| 886 | if (set_mtu) |
| 887 | dev->mtu = mtu; |
| 888 | } |
| 889 | dst_cache_reset(&t->dst_cache); |
| 890 | netdev_state_change(dev); |
| 891 | } |
| 892 | |
| 893 | int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) |
| 894 | { |
| 895 | int err = 0; |
| 896 | struct ip_tunnel *t = netdev_priv(dev); |
| 897 | struct net *net = t->net; |
| 898 | struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id); |
| 899 | |
| 900 | switch (cmd) { |
| 901 | case SIOCGETTUNNEL: |
| 902 | if (dev == itn->fb_tunnel_dev) { |
| 903 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); |
| 904 | if (!t) |
| 905 | t = netdev_priv(dev); |
| 906 | } |
| 907 | memcpy(p, &t->parms, sizeof(*p)); |
| 908 | break; |
| 909 | |
| 910 | case SIOCADDTUNNEL: |
| 911 | case SIOCCHGTUNNEL: |
| 912 | err = -EPERM; |
| 913 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
| 914 | goto done; |
| 915 | if (p->iph.ttl) |
| 916 | p->iph.frag_off |= htons(IP_DF); |
| 917 | if (!(p->i_flags & VTI_ISVTI)) { |
| 918 | if (!(p->i_flags & TUNNEL_KEY)) |
| 919 | p->i_key = 0; |
| 920 | if (!(p->o_flags & TUNNEL_KEY)) |
| 921 | p->o_key = 0; |
| 922 | } |
| 923 | |
| 924 | t = ip_tunnel_find(itn, p, itn->type); |
| 925 | |
| 926 | if (cmd == SIOCADDTUNNEL) { |
| 927 | if (!t) { |
| 928 | t = ip_tunnel_create(net, itn, p); |
| 929 | err = PTR_ERR_OR_ZERO(t); |
| 930 | break; |
| 931 | } |
| 932 | |
| 933 | err = -EEXIST; |
| 934 | break; |
| 935 | } |
| 936 | if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { |
| 937 | if (t) { |
| 938 | if (t->dev != dev) { |
| 939 | err = -EEXIST; |
| 940 | break; |
| 941 | } |
| 942 | } else { |
| 943 | unsigned int nflags = 0; |
| 944 | |
| 945 | if (ipv4_is_multicast(p->iph.daddr)) |
| 946 | nflags = IFF_BROADCAST; |
| 947 | else if (p->iph.daddr) |
| 948 | nflags = IFF_POINTOPOINT; |
| 949 | |
| 950 | if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) { |
| 951 | err = -EINVAL; |
| 952 | break; |
| 953 | } |
| 954 | |
| 955 | t = netdev_priv(dev); |
| 956 | } |
| 957 | } |
| 958 | |
| 959 | if (t) { |
| 960 | err = 0; |
| 961 | ip_tunnel_update(itn, t, dev, p, true, 0); |
| 962 | } else { |
| 963 | err = -ENOENT; |
| 964 | } |
| 965 | break; |
| 966 | |
| 967 | case SIOCDELTUNNEL: |
| 968 | err = -EPERM; |
| 969 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
| 970 | goto done; |
| 971 | |
| 972 | if (dev == itn->fb_tunnel_dev) { |
| 973 | err = -ENOENT; |
| 974 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); |
| 975 | if (!t) |
| 976 | goto done; |
| 977 | err = -EPERM; |
| 978 | if (t == netdev_priv(itn->fb_tunnel_dev)) |
| 979 | goto done; |
| 980 | dev = t->dev; |
| 981 | } |
| 982 | unregister_netdevice(dev); |
| 983 | err = 0; |
| 984 | break; |
| 985 | |
| 986 | default: |
| 987 | err = -EINVAL; |
| 988 | } |
| 989 | |
| 990 | done: |
| 991 | return err; |
| 992 | } |
| 993 | EXPORT_SYMBOL_GPL(ip_tunnel_ioctl); |
| 994 | |
| 995 | int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) |
| 996 | { |
| 997 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 998 | int t_hlen = tunnel->hlen + sizeof(struct iphdr); |
| 999 | int max_mtu = IP_MAX_MTU - t_hlen; |
| 1000 | |
| 1001 | if (dev->type == ARPHRD_ETHER) |
| 1002 | max_mtu -= dev->hard_header_len; |
| 1003 | |
| 1004 | if (new_mtu < ETH_MIN_MTU) |
| 1005 | return -EINVAL; |
| 1006 | |
| 1007 | if (new_mtu > max_mtu) { |
| 1008 | if (strict) |
| 1009 | return -EINVAL; |
| 1010 | |
| 1011 | new_mtu = max_mtu; |
| 1012 | } |
| 1013 | |
| 1014 | dev->mtu = new_mtu; |
| 1015 | return 0; |
| 1016 | } |
| 1017 | EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu); |
| 1018 | |
| 1019 | int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu) |
| 1020 | { |
| 1021 | return __ip_tunnel_change_mtu(dev, new_mtu, true); |
| 1022 | } |
| 1023 | EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu); |
| 1024 | |
| 1025 | static void ip_tunnel_dev_free(struct net_device *dev) |
| 1026 | { |
| 1027 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 1028 | |
| 1029 | gro_cells_destroy(&tunnel->gro_cells); |
| 1030 | dst_cache_destroy(&tunnel->dst_cache); |
| 1031 | free_percpu(dev->tstats); |
| 1032 | } |
| 1033 | |
| 1034 | void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) |
| 1035 | { |
| 1036 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 1037 | struct ip_tunnel_net *itn; |
| 1038 | |
| 1039 | itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id); |
| 1040 | |
| 1041 | if (itn->fb_tunnel_dev != dev) { |
| 1042 | ip_tunnel_del(itn, netdev_priv(dev)); |
| 1043 | unregister_netdevice_queue(dev, head); |
| 1044 | } |
| 1045 | } |
| 1046 | EXPORT_SYMBOL_GPL(ip_tunnel_dellink); |
| 1047 | |
| 1048 | struct net *ip_tunnel_get_link_net(const struct net_device *dev) |
| 1049 | { |
| 1050 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 1051 | |
| 1052 | return tunnel->net; |
| 1053 | } |
| 1054 | EXPORT_SYMBOL(ip_tunnel_get_link_net); |
| 1055 | |
| 1056 | int ip_tunnel_get_iflink(const struct net_device *dev) |
| 1057 | { |
| 1058 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 1059 | |
| 1060 | return tunnel->parms.link; |
| 1061 | } |
| 1062 | EXPORT_SYMBOL(ip_tunnel_get_iflink); |
| 1063 | |
| 1064 | int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id, |
| 1065 | struct rtnl_link_ops *ops, char *devname) |
| 1066 | { |
| 1067 | struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); |
| 1068 | struct ip_tunnel_parm parms; |
| 1069 | unsigned int i; |
| 1070 | |
| 1071 | itn->rtnl_link_ops = ops; |
| 1072 | for (i = 0; i < IP_TNL_HASH_SIZE; i++) |
| 1073 | INIT_HLIST_HEAD(&itn->tunnels[i]); |
| 1074 | |
| 1075 | if (!ops || !net_has_fallback_tunnels(net)) { |
| 1076 | struct ip_tunnel_net *it_init_net; |
| 1077 | |
| 1078 | it_init_net = net_generic(&init_net, ip_tnl_net_id); |
| 1079 | itn->type = it_init_net->type; |
| 1080 | itn->fb_tunnel_dev = NULL; |
| 1081 | return 0; |
| 1082 | } |
| 1083 | |
| 1084 | memset(&parms, 0, sizeof(parms)); |
| 1085 | if (devname) |
| 1086 | strlcpy(parms.name, devname, IFNAMSIZ); |
| 1087 | |
| 1088 | rtnl_lock(); |
| 1089 | itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms); |
| 1090 | /* FB netdevice is special: we have one, and only one per netns. |
| 1091 | * Allowing to move it to another netns is clearly unsafe. |
| 1092 | */ |
| 1093 | if (!IS_ERR(itn->fb_tunnel_dev)) { |
| 1094 | itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; |
| 1095 | itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev); |
| 1096 | ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); |
| 1097 | itn->type = itn->fb_tunnel_dev->type; |
| 1098 | } |
| 1099 | rtnl_unlock(); |
| 1100 | |
| 1101 | return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev); |
| 1102 | } |
| 1103 | EXPORT_SYMBOL_GPL(ip_tunnel_init_net); |
| 1104 | |
| 1105 | static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn, |
| 1106 | struct list_head *head, |
| 1107 | struct rtnl_link_ops *ops) |
| 1108 | { |
| 1109 | struct net_device *dev, *aux; |
| 1110 | int h; |
| 1111 | |
| 1112 | for_each_netdev_safe(net, dev, aux) |
| 1113 | if (dev->rtnl_link_ops == ops) |
| 1114 | unregister_netdevice_queue(dev, head); |
| 1115 | |
| 1116 | for (h = 0; h < IP_TNL_HASH_SIZE; h++) { |
| 1117 | struct ip_tunnel *t; |
| 1118 | struct hlist_node *n; |
| 1119 | struct hlist_head *thead = &itn->tunnels[h]; |
| 1120 | |
| 1121 | hlist_for_each_entry_safe(t, n, thead, hash_node) |
| 1122 | /* If dev is in the same netns, it has already |
| 1123 | * been added to the list by the previous loop. |
| 1124 | */ |
| 1125 | if (!net_eq(dev_net(t->dev), net)) |
| 1126 | unregister_netdevice_queue(t->dev, head); |
| 1127 | } |
| 1128 | } |
| 1129 | |
| 1130 | void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id, |
| 1131 | struct rtnl_link_ops *ops) |
| 1132 | { |
| 1133 | struct ip_tunnel_net *itn; |
| 1134 | struct net *net; |
| 1135 | LIST_HEAD(list); |
| 1136 | |
| 1137 | rtnl_lock(); |
| 1138 | list_for_each_entry(net, net_list, exit_list) { |
| 1139 | itn = net_generic(net, id); |
| 1140 | ip_tunnel_destroy(net, itn, &list, ops); |
| 1141 | } |
| 1142 | unregister_netdevice_many(&list); |
| 1143 | rtnl_unlock(); |
| 1144 | } |
| 1145 | EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets); |
| 1146 | |
| 1147 | int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], |
| 1148 | struct ip_tunnel_parm *p, __u32 fwmark) |
| 1149 | { |
| 1150 | struct ip_tunnel *nt; |
| 1151 | struct net *net = dev_net(dev); |
| 1152 | struct ip_tunnel_net *itn; |
| 1153 | int mtu; |
| 1154 | int err; |
| 1155 | |
| 1156 | nt = netdev_priv(dev); |
| 1157 | itn = net_generic(net, nt->ip_tnl_net_id); |
| 1158 | |
| 1159 | if (nt->collect_md) { |
| 1160 | if (rtnl_dereference(itn->collect_md_tun)) |
| 1161 | return -EEXIST; |
| 1162 | } else { |
| 1163 | if (ip_tunnel_find(itn, p, dev->type)) |
| 1164 | return -EEXIST; |
| 1165 | } |
| 1166 | |
| 1167 | nt->net = net; |
| 1168 | nt->parms = *p; |
| 1169 | nt->fwmark = fwmark; |
| 1170 | err = register_netdevice(dev); |
| 1171 | if (err) |
| 1172 | goto err_register_netdevice; |
| 1173 | |
| 1174 | if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) |
| 1175 | eth_hw_addr_random(dev); |
| 1176 | |
| 1177 | mtu = ip_tunnel_bind_dev(dev); |
| 1178 | if (tb[IFLA_MTU]) { |
| 1179 | unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr)); |
| 1180 | |
| 1181 | if (dev->type == ARPHRD_ETHER) |
| 1182 | max -= dev->hard_header_len; |
| 1183 | |
| 1184 | mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max); |
| 1185 | } |
| 1186 | |
| 1187 | err = dev_set_mtu(dev, mtu); |
| 1188 | if (err) |
| 1189 | goto err_dev_set_mtu; |
| 1190 | |
| 1191 | ip_tunnel_add(itn, nt); |
| 1192 | return 0; |
| 1193 | |
| 1194 | err_dev_set_mtu: |
| 1195 | unregister_netdevice(dev); |
| 1196 | err_register_netdevice: |
| 1197 | return err; |
| 1198 | } |
| 1199 | EXPORT_SYMBOL_GPL(ip_tunnel_newlink); |
| 1200 | |
| 1201 | int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], |
| 1202 | struct ip_tunnel_parm *p, __u32 fwmark) |
| 1203 | { |
| 1204 | struct ip_tunnel *t; |
| 1205 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 1206 | struct net *net = tunnel->net; |
| 1207 | struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id); |
| 1208 | |
| 1209 | if (dev == itn->fb_tunnel_dev) |
| 1210 | return -EINVAL; |
| 1211 | |
| 1212 | t = ip_tunnel_find(itn, p, dev->type); |
| 1213 | |
| 1214 | if (t) { |
| 1215 | if (t->dev != dev) |
| 1216 | return -EEXIST; |
| 1217 | } else { |
| 1218 | t = tunnel; |
| 1219 | |
| 1220 | if (dev->type != ARPHRD_ETHER) { |
| 1221 | unsigned int nflags = 0; |
| 1222 | |
| 1223 | if (ipv4_is_multicast(p->iph.daddr)) |
| 1224 | nflags = IFF_BROADCAST; |
| 1225 | else if (p->iph.daddr) |
| 1226 | nflags = IFF_POINTOPOINT; |
| 1227 | |
| 1228 | if ((dev->flags ^ nflags) & |
| 1229 | (IFF_POINTOPOINT | IFF_BROADCAST)) |
| 1230 | return -EINVAL; |
| 1231 | } |
| 1232 | } |
| 1233 | |
| 1234 | ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark); |
| 1235 | return 0; |
| 1236 | } |
| 1237 | EXPORT_SYMBOL_GPL(ip_tunnel_changelink); |
| 1238 | |
| 1239 | int ip_tunnel_init(struct net_device *dev) |
| 1240 | { |
| 1241 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 1242 | struct iphdr *iph = &tunnel->parms.iph; |
| 1243 | int err; |
| 1244 | |
| 1245 | dev->needs_free_netdev = true; |
| 1246 | dev->priv_destructor = ip_tunnel_dev_free; |
| 1247 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
| 1248 | if (!dev->tstats) |
| 1249 | return -ENOMEM; |
| 1250 | |
| 1251 | err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); |
| 1252 | if (err) { |
| 1253 | free_percpu(dev->tstats); |
| 1254 | return err; |
| 1255 | } |
| 1256 | |
| 1257 | err = gro_cells_init(&tunnel->gro_cells, dev); |
| 1258 | if (err) { |
| 1259 | dst_cache_destroy(&tunnel->dst_cache); |
| 1260 | free_percpu(dev->tstats); |
| 1261 | return err; |
| 1262 | } |
| 1263 | |
| 1264 | tunnel->dev = dev; |
| 1265 | tunnel->net = dev_net(dev); |
| 1266 | strcpy(tunnel->parms.name, dev->name); |
| 1267 | iph->version = 4; |
| 1268 | iph->ihl = 5; |
| 1269 | |
| 1270 | if (tunnel->collect_md) |
| 1271 | netif_keep_dst(dev); |
| 1272 | return 0; |
| 1273 | } |
| 1274 | EXPORT_SYMBOL_GPL(ip_tunnel_init); |
| 1275 | |
| 1276 | void ip_tunnel_uninit(struct net_device *dev) |
| 1277 | { |
| 1278 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 1279 | struct net *net = tunnel->net; |
| 1280 | struct ip_tunnel_net *itn; |
| 1281 | |
| 1282 | itn = net_generic(net, tunnel->ip_tnl_net_id); |
| 1283 | ip_tunnel_del(itn, netdev_priv(dev)); |
| 1284 | if (itn->fb_tunnel_dev == dev) |
| 1285 | WRITE_ONCE(itn->fb_tunnel_dev, NULL); |
| 1286 | |
| 1287 | dst_cache_reset(&tunnel->dst_cache); |
| 1288 | } |
| 1289 | EXPORT_SYMBOL_GPL(ip_tunnel_uninit); |
| 1290 | |
| 1291 | /* Do least required initialization, rest of init is done in tunnel_init call */ |
| 1292 | void ip_tunnel_setup(struct net_device *dev, unsigned int net_id) |
| 1293 | { |
| 1294 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 1295 | tunnel->ip_tnl_net_id = net_id; |
| 1296 | } |
| 1297 | EXPORT_SYMBOL_GPL(ip_tunnel_setup); |
| 1298 | |
| 1299 | MODULE_LICENSE("GPL"); |