blob: bf1c318f5900dd789404810ea333da06270bc2a0 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
39 *
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
65#define pr_fmt(fmt) "IPv4: " fmt
66
67#include <linux/module.h>
68#include <linux/uaccess.h>
69#include <linux/bitops.h>
70#include <linux/types.h>
71#include <linux/kernel.h>
72#include <linux/mm.h>
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
82#include <linux/skbuff.h>
83#include <linux/inetdevice.h>
84#include <linux/igmp.h>
85#include <linux/pkt_sched.h>
86#include <linux/mroute.h>
87#include <linux/netfilter_ipv4.h>
88#include <linux/random.h>
89#include <linux/rcupdate.h>
90#include <linux/times.h>
91#include <linux/slab.h>
92#include <linux/jhash.h>
93#include <net/dst.h>
94#include <net/dst_metadata.h>
95#include <net/net_namespace.h>
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
106#include <net/lwtunnel.h>
107#include <net/netevent.h>
108#include <net/rtnetlink.h>
109#ifdef CONFIG_SYSCTL
110#include <linux/sysctl.h>
111#endif
112#include <net/secure_seq.h>
113#include <net/ip_tunnels.h>
114#include <net/l3mdev.h>
115
116#include "fib_lookup.h"
117
118#define RT_FL_TOS(oldflp4) \
119 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
120
121#define RT_GC_TIMEOUT (300*HZ)
122
123static int ip_rt_max_size;
124static int ip_rt_redirect_number __read_mostly = 9;
125static int ip_rt_redirect_load __read_mostly = HZ / 50;
126static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127static int ip_rt_error_cost __read_mostly = HZ;
128static int ip_rt_error_burst __read_mostly = 5 * HZ;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256;
132
133static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
134
135/*
136 * Interface to generic destination cache.
137 */
138
139static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
141static unsigned int ipv4_mtu(const struct dst_entry *dst);
142static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143static void ipv4_link_failure(struct sk_buff *skb);
144static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
145 struct sk_buff *skb, u32 mtu,
146 bool confirm_neigh);
147static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
148 struct sk_buff *skb);
149static void ipv4_dst_destroy(struct dst_entry *dst);
150
151static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
152{
153 WARN_ON(1);
154 return NULL;
155}
156
157static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
158 struct sk_buff *skb,
159 const void *daddr);
160static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
161
162static struct dst_ops ipv4_dst_ops = {
163 .family = AF_INET,
164 .check = ipv4_dst_check,
165 .default_advmss = ipv4_default_advmss,
166 .mtu = ipv4_mtu,
167 .cow_metrics = ipv4_cow_metrics,
168 .destroy = ipv4_dst_destroy,
169 .negative_advice = ipv4_negative_advice,
170 .link_failure = ipv4_link_failure,
171 .update_pmtu = ip_rt_update_pmtu,
172 .redirect = ip_do_redirect,
173 .local_out = __ip_local_out,
174 .neigh_lookup = ipv4_neigh_lookup,
175 .confirm_neigh = ipv4_confirm_neigh,
176};
177
178#define ECN_OR_COST(class) TC_PRIO_##class
179
180const __u8 ip_tos2prio[16] = {
181 TC_PRIO_BESTEFFORT,
182 ECN_OR_COST(BESTEFFORT),
183 TC_PRIO_BESTEFFORT,
184 ECN_OR_COST(BESTEFFORT),
185 TC_PRIO_BULK,
186 ECN_OR_COST(BULK),
187 TC_PRIO_BULK,
188 ECN_OR_COST(BULK),
189 TC_PRIO_INTERACTIVE,
190 ECN_OR_COST(INTERACTIVE),
191 TC_PRIO_INTERACTIVE,
192 ECN_OR_COST(INTERACTIVE),
193 TC_PRIO_INTERACTIVE_BULK,
194 ECN_OR_COST(INTERACTIVE_BULK),
195 TC_PRIO_INTERACTIVE_BULK,
196 ECN_OR_COST(INTERACTIVE_BULK)
197};
198EXPORT_SYMBOL(ip_tos2prio);
199
200static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
201#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
202
203#ifdef CONFIG_PROC_FS
204static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
205{
206 if (*pos)
207 return NULL;
208 return SEQ_START_TOKEN;
209}
210
211static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
212{
213 ++*pos;
214 return NULL;
215}
216
217static void rt_cache_seq_stop(struct seq_file *seq, void *v)
218{
219}
220
221static int rt_cache_seq_show(struct seq_file *seq, void *v)
222{
223 if (v == SEQ_START_TOKEN)
224 seq_printf(seq, "%-127s\n",
225 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
226 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
227 "HHUptod\tSpecDst");
228 return 0;
229}
230
231static const struct seq_operations rt_cache_seq_ops = {
232 .start = rt_cache_seq_start,
233 .next = rt_cache_seq_next,
234 .stop = rt_cache_seq_stop,
235 .show = rt_cache_seq_show,
236};
237
238static int rt_cache_seq_open(struct inode *inode, struct file *file)
239{
240 return seq_open(file, &rt_cache_seq_ops);
241}
242
243static const struct file_operations rt_cache_seq_fops = {
244 .open = rt_cache_seq_open,
245 .read = seq_read,
246 .llseek = seq_lseek,
247 .release = seq_release,
248};
249
250
251static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
252{
253 int cpu;
254
255 if (*pos == 0)
256 return SEQ_START_TOKEN;
257
258 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
259 if (!cpu_possible(cpu))
260 continue;
261 *pos = cpu+1;
262 return &per_cpu(rt_cache_stat, cpu);
263 }
264 return NULL;
265}
266
267static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
268{
269 int cpu;
270
271 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
272 if (!cpu_possible(cpu))
273 continue;
274 *pos = cpu+1;
275 return &per_cpu(rt_cache_stat, cpu);
276 }
277 return NULL;
278
279}
280
281static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
282{
283
284}
285
286static int rt_cpu_seq_show(struct seq_file *seq, void *v)
287{
288 struct rt_cache_stat *st = v;
289
290 if (v == SEQ_START_TOKEN) {
291 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
292 return 0;
293 }
294
295 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
296 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
297 dst_entries_get_slow(&ipv4_dst_ops),
298 0, /* st->in_hit */
299 st->in_slow_tot,
300 st->in_slow_mc,
301 st->in_no_route,
302 st->in_brd,
303 st->in_martian_dst,
304 st->in_martian_src,
305
306 0, /* st->out_hit */
307 st->out_slow_tot,
308 st->out_slow_mc,
309
310 0, /* st->gc_total */
311 0, /* st->gc_ignored */
312 0, /* st->gc_goal_miss */
313 0, /* st->gc_dst_overflow */
314 0, /* st->in_hlist_search */
315 0 /* st->out_hlist_search */
316 );
317 return 0;
318}
319
320static const struct seq_operations rt_cpu_seq_ops = {
321 .start = rt_cpu_seq_start,
322 .next = rt_cpu_seq_next,
323 .stop = rt_cpu_seq_stop,
324 .show = rt_cpu_seq_show,
325};
326
327
328static int rt_cpu_seq_open(struct inode *inode, struct file *file)
329{
330 return seq_open(file, &rt_cpu_seq_ops);
331}
332
333static const struct file_operations rt_cpu_seq_fops = {
334 .open = rt_cpu_seq_open,
335 .read = seq_read,
336 .llseek = seq_lseek,
337 .release = seq_release,
338};
339
340#ifdef CONFIG_IP_ROUTE_CLASSID
341static int rt_acct_proc_show(struct seq_file *m, void *v)
342{
343 struct ip_rt_acct *dst, *src;
344 unsigned int i, j;
345
346 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
347 if (!dst)
348 return -ENOMEM;
349
350 for_each_possible_cpu(i) {
351 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
352 for (j = 0; j < 256; j++) {
353 dst[j].o_bytes += src[j].o_bytes;
354 dst[j].o_packets += src[j].o_packets;
355 dst[j].i_bytes += src[j].i_bytes;
356 dst[j].i_packets += src[j].i_packets;
357 }
358 }
359
360 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
361 kfree(dst);
362 return 0;
363}
364#endif
365
366static int __net_init ip_rt_do_proc_init(struct net *net)
367{
368 struct proc_dir_entry *pde;
369
370 pde = proc_create("rt_cache", 0444, net->proc_net,
371 &rt_cache_seq_fops);
372 if (!pde)
373 goto err1;
374
375 pde = proc_create("rt_cache", 0444,
376 net->proc_net_stat, &rt_cpu_seq_fops);
377 if (!pde)
378 goto err2;
379
380#ifdef CONFIG_IP_ROUTE_CLASSID
381 pde = proc_create_single("rt_acct", 0, net->proc_net,
382 rt_acct_proc_show);
383 if (!pde)
384 goto err3;
385#endif
386 return 0;
387
388#ifdef CONFIG_IP_ROUTE_CLASSID
389err3:
390 remove_proc_entry("rt_cache", net->proc_net_stat);
391#endif
392err2:
393 remove_proc_entry("rt_cache", net->proc_net);
394err1:
395 return -ENOMEM;
396}
397
398static void __net_exit ip_rt_do_proc_exit(struct net *net)
399{
400 remove_proc_entry("rt_cache", net->proc_net_stat);
401 remove_proc_entry("rt_cache", net->proc_net);
402#ifdef CONFIG_IP_ROUTE_CLASSID
403 remove_proc_entry("rt_acct", net->proc_net);
404#endif
405}
406
407static struct pernet_operations ip_rt_proc_ops __net_initdata = {
408 .init = ip_rt_do_proc_init,
409 .exit = ip_rt_do_proc_exit,
410};
411
412static int __init ip_rt_proc_init(void)
413{
414 if (IS_ENABLED(CONFIG_PROC_STRIPPED))
415 return 0;
416
417 return register_pernet_subsys(&ip_rt_proc_ops);
418}
419
420#else
421static inline int ip_rt_proc_init(void)
422{
423 return 0;
424}
425#endif /* CONFIG_PROC_FS */
426
427static inline bool rt_is_expired(const struct rtable *rth)
428{
429 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
430}
431
432void rt_cache_flush(struct net *net)
433{
434 rt_genid_bump_ipv4(net);
435}
436
437static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
438 struct sk_buff *skb,
439 const void *daddr)
440{
441 struct net_device *dev = dst->dev;
442 const __be32 *pkey = daddr;
443 const struct rtable *rt;
444 struct neighbour *n;
445
446 rt = (const struct rtable *) dst;
447 if (rt->rt_gateway)
448 pkey = (const __be32 *) &rt->rt_gateway;
449 else if (skb)
450 pkey = &ip_hdr(skb)->daddr;
451
452 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
453 if (n)
454 return n;
455 return neigh_create(&arp_tbl, pkey, dev);
456}
457
458static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
459{
460 struct net_device *dev = dst->dev;
461 const __be32 *pkey = daddr;
462 const struct rtable *rt;
463
464 rt = (const struct rtable *)dst;
465 if (rt->rt_gateway)
466 pkey = (const __be32 *)&rt->rt_gateway;
467 else if (!daddr ||
468 (rt->rt_flags &
469 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
470 return;
471
472 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
473}
474
475#define IP_IDENTS_SZ 2048u
476
477static atomic_t *ip_idents __read_mostly;
478static u32 *ip_tstamps __read_mostly;
479
480/* In order to protect privacy, we add a perturbation to identifiers
481 * if one generator is seldom used. This makes hard for an attacker
482 * to infer how many packets were sent between two points in time.
483 */
484u32 ip_idents_reserve(u32 hash, int segs)
485{
486 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
487 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
488 u32 old = READ_ONCE(*p_tstamp);
489 u32 now = (u32)jiffies;
490 u32 new, delta = 0;
491
492 if (old != now && cmpxchg(p_tstamp, old, now) == old)
493 delta = prandom_u32_max(now - old);
494
495 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
496 do {
497 old = (u32)atomic_read(p_id);
498 new = old + delta + segs;
499 } while (atomic_cmpxchg(p_id, old, new) != old);
500
501 return new - segs;
502}
503EXPORT_SYMBOL(ip_idents_reserve);
504
505void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
506{
507 u32 hash, id;
508
509 /* Note the following code is not safe, but this is okay. */
510 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
511 get_random_bytes(&net->ipv4.ip_id_key,
512 sizeof(net->ipv4.ip_id_key));
513
514 hash = siphash_3u32((__force u32)iph->daddr,
515 (__force u32)iph->saddr,
516 iph->protocol,
517 &net->ipv4.ip_id_key);
518 id = ip_idents_reserve(hash, segs);
519 iph->id = htons(id);
520}
521EXPORT_SYMBOL(__ip_select_ident);
522
523static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
524 const struct sock *sk,
525 const struct iphdr *iph,
526 int oif, u8 tos,
527 u8 prot, u32 mark, int flow_flags)
528{
529 if (sk) {
530 const struct inet_sock *inet = inet_sk(sk);
531
532 oif = sk->sk_bound_dev_if;
533 mark = sk->sk_mark;
534 tos = RT_CONN_FLAGS(sk);
535 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
536 }
537 flowi4_init_output(fl4, oif, mark, tos,
538 RT_SCOPE_UNIVERSE, prot,
539 flow_flags,
540 iph->daddr, iph->saddr, 0, 0,
541 sock_net_uid(net, sk));
542}
543
544static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
545 const struct sock *sk)
546{
547 const struct net *net = dev_net(skb->dev);
548 const struct iphdr *iph = ip_hdr(skb);
549 int oif = skb->dev->ifindex;
550 u8 tos = RT_TOS(iph->tos);
551 u8 prot = iph->protocol;
552 u32 mark = skb->mark;
553
554 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
555}
556
557static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
558{
559 const struct inet_sock *inet = inet_sk(sk);
560 const struct ip_options_rcu *inet_opt;
561 __be32 daddr = inet->inet_daddr;
562
563 rcu_read_lock();
564 inet_opt = rcu_dereference(inet->inet_opt);
565 if (inet_opt && inet_opt->opt.srr)
566 daddr = inet_opt->opt.faddr;
567 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
568 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
569 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
570 inet_sk_flowi_flags(sk),
571 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
572 rcu_read_unlock();
573}
574
575static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
576 const struct sk_buff *skb)
577{
578 if (skb)
579 build_skb_flow_key(fl4, skb, sk);
580 else
581 build_sk_flow_key(fl4, sk);
582}
583
584static DEFINE_SPINLOCK(fnhe_lock);
585
586static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
587{
588 struct rtable *rt;
589
590 rt = rcu_dereference(fnhe->fnhe_rth_input);
591 if (rt) {
592 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
593 dst_dev_put(&rt->dst);
594 dst_release(&rt->dst);
595 }
596 rt = rcu_dereference(fnhe->fnhe_rth_output);
597 if (rt) {
598 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
599 dst_dev_put(&rt->dst);
600 dst_release(&rt->dst);
601 }
602}
603
604static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
605{
606 struct fib_nh_exception *fnhe, *oldest;
607
608 oldest = rcu_dereference(hash->chain);
609 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
610 fnhe = rcu_dereference(fnhe->fnhe_next)) {
611 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
612 oldest = fnhe;
613 }
614 fnhe_flush_routes(oldest);
615 return oldest;
616}
617
618static inline u32 fnhe_hashfun(__be32 daddr)
619{
620 static u32 fnhe_hashrnd __read_mostly;
621 u32 hval;
622
623 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
624 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
625 return hash_32(hval, FNHE_HASH_SHIFT);
626}
627
628static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
629{
630 rt->rt_pmtu = fnhe->fnhe_pmtu;
631 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
632 rt->dst.expires = fnhe->fnhe_expires;
633
634 if (fnhe->fnhe_gw) {
635 rt->rt_flags |= RTCF_REDIRECTED;
636 rt->rt_gateway = fnhe->fnhe_gw;
637 rt->rt_uses_gateway = 1;
638 }
639}
640
641static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
642 u32 pmtu, bool lock, unsigned long expires)
643{
644 struct fnhe_hash_bucket *hash;
645 struct fib_nh_exception *fnhe;
646 struct rtable *rt;
647 u32 genid, hval;
648 unsigned int i;
649 int depth;
650
651 genid = fnhe_genid(dev_net(nh->nh_dev));
652 hval = fnhe_hashfun(daddr);
653
654 spin_lock_bh(&fnhe_lock);
655
656 hash = rcu_dereference(nh->nh_exceptions);
657 if (!hash) {
658 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
659 if (!hash)
660 goto out_unlock;
661 rcu_assign_pointer(nh->nh_exceptions, hash);
662 }
663
664 hash += hval;
665
666 depth = 0;
667 for (fnhe = rcu_dereference(hash->chain); fnhe;
668 fnhe = rcu_dereference(fnhe->fnhe_next)) {
669 if (fnhe->fnhe_daddr == daddr)
670 break;
671 depth++;
672 }
673
674 if (fnhe) {
675 if (fnhe->fnhe_genid != genid)
676 fnhe->fnhe_genid = genid;
677 if (gw)
678 fnhe->fnhe_gw = gw;
679 if (pmtu) {
680 fnhe->fnhe_pmtu = pmtu;
681 fnhe->fnhe_mtu_locked = lock;
682 }
683 fnhe->fnhe_expires = max(1UL, expires);
684 /* Update all cached dsts too */
685 rt = rcu_dereference(fnhe->fnhe_rth_input);
686 if (rt)
687 fill_route_from_fnhe(rt, fnhe);
688 rt = rcu_dereference(fnhe->fnhe_rth_output);
689 if (rt)
690 fill_route_from_fnhe(rt, fnhe);
691 } else {
692 if (depth > FNHE_RECLAIM_DEPTH)
693 fnhe = fnhe_oldest(hash);
694 else {
695 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
696 if (!fnhe)
697 goto out_unlock;
698
699 fnhe->fnhe_next = hash->chain;
700 rcu_assign_pointer(hash->chain, fnhe);
701 }
702 fnhe->fnhe_genid = genid;
703 fnhe->fnhe_daddr = daddr;
704 fnhe->fnhe_gw = gw;
705 fnhe->fnhe_pmtu = pmtu;
706 fnhe->fnhe_mtu_locked = lock;
707 fnhe->fnhe_expires = max(1UL, expires);
708
709 /* Exception created; mark the cached routes for the nexthop
710 * stale, so anyone caching it rechecks if this exception
711 * applies to them.
712 */
713 rt = rcu_dereference(nh->nh_rth_input);
714 if (rt)
715 rt->dst.obsolete = DST_OBSOLETE_KILL;
716
717 for_each_possible_cpu(i) {
718 struct rtable __rcu **prt;
719 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
720 rt = rcu_dereference(*prt);
721 if (rt)
722 rt->dst.obsolete = DST_OBSOLETE_KILL;
723 }
724 }
725
726 fnhe->fnhe_stamp = jiffies;
727
728out_unlock:
729 spin_unlock_bh(&fnhe_lock);
730}
731
732static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
733 bool kill_route)
734{
735 __be32 new_gw = icmp_hdr(skb)->un.gateway;
736 __be32 old_gw = ip_hdr(skb)->saddr;
737 struct net_device *dev = skb->dev;
738 struct in_device *in_dev;
739 struct fib_result res;
740 struct neighbour *n;
741 struct net *net;
742
743 switch (icmp_hdr(skb)->code & 7) {
744 case ICMP_REDIR_NET:
745 case ICMP_REDIR_NETTOS:
746 case ICMP_REDIR_HOST:
747 case ICMP_REDIR_HOSTTOS:
748 break;
749
750 default:
751 return;
752 }
753
754 if (rt->rt_gateway != old_gw)
755 return;
756
757 in_dev = __in_dev_get_rcu(dev);
758 if (!in_dev)
759 return;
760
761 net = dev_net(dev);
762 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
763 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
764 ipv4_is_zeronet(new_gw))
765 goto reject_redirect;
766
767 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
768 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
769 goto reject_redirect;
770 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
771 goto reject_redirect;
772 } else {
773 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
774 goto reject_redirect;
775 }
776
777 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
778 if (!n)
779 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
780 if (!IS_ERR(n)) {
781 if (!(n->nud_state & NUD_VALID)) {
782 neigh_event_send(n, NULL);
783 } else {
784 if (fib_lookup(net, fl4, &res, 0) == 0) {
785 struct fib_nh *nh = &FIB_RES_NH(res);
786
787 update_or_create_fnhe(nh, fl4->daddr, new_gw,
788 0, false,
789 jiffies + ip_rt_gc_timeout);
790 }
791 if (kill_route)
792 rt->dst.obsolete = DST_OBSOLETE_KILL;
793 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
794 }
795 neigh_release(n);
796 }
797 return;
798
799reject_redirect:
800#ifdef CONFIG_IP_ROUTE_VERBOSE
801 if (IN_DEV_LOG_MARTIANS(in_dev)) {
802 const struct iphdr *iph = (const struct iphdr *) skb->data;
803 __be32 daddr = iph->daddr;
804 __be32 saddr = iph->saddr;
805
806 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
807 " Advised path = %pI4 -> %pI4\n",
808 &old_gw, dev->name, &new_gw,
809 &saddr, &daddr);
810 }
811#endif
812 ;
813}
814
815static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
816{
817 struct rtable *rt;
818 struct flowi4 fl4;
819 const struct iphdr *iph = (const struct iphdr *) skb->data;
820 struct net *net = dev_net(skb->dev);
821 int oif = skb->dev->ifindex;
822 u8 tos = RT_TOS(iph->tos);
823 u8 prot = iph->protocol;
824 u32 mark = skb->mark;
825
826 rt = (struct rtable *) dst;
827
828 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
829 __ip_do_redirect(rt, skb, &fl4, true);
830}
831
832static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
833{
834 struct rtable *rt = (struct rtable *)dst;
835 struct dst_entry *ret = dst;
836
837 if (rt) {
838 if (dst->obsolete > 0) {
839 ip_rt_put(rt);
840 ret = NULL;
841 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
842 rt->dst.expires) {
843 ip_rt_put(rt);
844 ret = NULL;
845 }
846 }
847 return ret;
848}
849
850/*
851 * Algorithm:
852 * 1. The first ip_rt_redirect_number redirects are sent
853 * with exponential backoff, then we stop sending them at all,
854 * assuming that the host ignores our redirects.
855 * 2. If we did not see packets requiring redirects
856 * during ip_rt_redirect_silence, we assume that the host
857 * forgot redirected route and start to send redirects again.
858 *
859 * This algorithm is much cheaper and more intelligent than dumb load limiting
860 * in icmp.c.
861 *
862 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
863 * and "frag. need" (breaks PMTU discovery) in icmp.c.
864 */
865
866void ip_rt_send_redirect(struct sk_buff *skb)
867{
868 struct rtable *rt = skb_rtable(skb);
869 struct in_device *in_dev;
870 struct inet_peer *peer;
871 struct net *net;
872 int log_martians;
873 int vif;
874
875 rcu_read_lock();
876 in_dev = __in_dev_get_rcu(rt->dst.dev);
877 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
878 rcu_read_unlock();
879 return;
880 }
881 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
882 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
883 rcu_read_unlock();
884
885 net = dev_net(rt->dst.dev);
886 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
887 if (!peer) {
888 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
889 rt_nexthop(rt, ip_hdr(skb)->daddr));
890 return;
891 }
892
893 /* No redirected packets during ip_rt_redirect_silence;
894 * reset the algorithm.
895 */
896 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
897 peer->rate_tokens = 0;
898 peer->n_redirects = 0;
899 }
900
901 /* Too many ignored redirects; do not send anything
902 * set dst.rate_last to the last seen redirected packet.
903 */
904 if (peer->n_redirects >= ip_rt_redirect_number) {
905 peer->rate_last = jiffies;
906 goto out_put_peer;
907 }
908
909 /* Check for load limit; set rate_last to the latest sent
910 * redirect.
911 */
912 if (peer->rate_tokens == 0 ||
913 time_after(jiffies,
914 (peer->rate_last +
915 (ip_rt_redirect_load << peer->n_redirects)))) {
916 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
917
918 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
919 peer->rate_last = jiffies;
920 ++peer->n_redirects;
921#ifdef CONFIG_IP_ROUTE_VERBOSE
922 if (log_martians &&
923 peer->n_redirects == ip_rt_redirect_number)
924 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
925 &ip_hdr(skb)->saddr, inet_iif(skb),
926 &ip_hdr(skb)->daddr, &gw);
927#endif
928 }
929out_put_peer:
930 inet_putpeer(peer);
931}
932
933static int ip_error(struct sk_buff *skb)
934{
935 struct rtable *rt = skb_rtable(skb);
936 struct net_device *dev = skb->dev;
937 struct in_device *in_dev;
938 struct inet_peer *peer;
939 unsigned long now;
940 struct net *net;
941 bool send;
942 int code;
943
944 if (netif_is_l3_master(skb->dev)) {
945 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
946 if (!dev)
947 goto out;
948 }
949
950 in_dev = __in_dev_get_rcu(dev);
951
952 /* IP on this device is disabled. */
953 if (!in_dev)
954 goto out;
955
956 net = dev_net(rt->dst.dev);
957 if (!IN_DEV_FORWARD(in_dev)) {
958 switch (rt->dst.error) {
959 case EHOSTUNREACH:
960 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
961 break;
962
963 case ENETUNREACH:
964 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
965 break;
966 }
967 goto out;
968 }
969
970 switch (rt->dst.error) {
971 case EINVAL:
972 default:
973 goto out;
974 case EHOSTUNREACH:
975 code = ICMP_HOST_UNREACH;
976 break;
977 case ENETUNREACH:
978 code = ICMP_NET_UNREACH;
979 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
980 break;
981 case EACCES:
982 code = ICMP_PKT_FILTERED;
983 break;
984 }
985
986 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
987 l3mdev_master_ifindex(skb->dev), 1);
988
989 send = true;
990 if (peer) {
991 now = jiffies;
992 peer->rate_tokens += now - peer->rate_last;
993 if (peer->rate_tokens > ip_rt_error_burst)
994 peer->rate_tokens = ip_rt_error_burst;
995 peer->rate_last = now;
996 if (peer->rate_tokens >= ip_rt_error_cost)
997 peer->rate_tokens -= ip_rt_error_cost;
998 else
999 send = false;
1000 inet_putpeer(peer);
1001 }
1002 if (send)
1003 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1004
1005out: kfree_skb(skb);
1006 return 0;
1007}
1008
1009static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1010{
1011 struct dst_entry *dst = &rt->dst;
1012 u32 old_mtu = ipv4_mtu(dst);
1013 struct fib_result res;
1014 bool lock = false;
1015
1016 if (ip_mtu_locked(dst))
1017 return;
1018
1019 if (old_mtu < mtu)
1020 return;
1021
1022 if (mtu < ip_rt_min_pmtu) {
1023 lock = true;
1024 mtu = min(old_mtu, ip_rt_min_pmtu);
1025 }
1026
1027 if (rt->rt_pmtu == mtu && !lock &&
1028 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1029 return;
1030
1031 rcu_read_lock();
1032 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1033 struct fib_nh *nh = &FIB_RES_NH(res);
1034
1035 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
1036 jiffies + ip_rt_mtu_expires);
1037 }
1038 rcu_read_unlock();
1039}
1040
1041static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1042 struct sk_buff *skb, u32 mtu,
1043 bool confirm_neigh)
1044{
1045 struct rtable *rt = (struct rtable *) dst;
1046 struct flowi4 fl4;
1047
1048 ip_rt_build_flow_key(&fl4, sk, skb);
1049 __ip_rt_update_pmtu(rt, &fl4, mtu);
1050}
1051
1052void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1053 int oif, u32 mark, u8 protocol, int flow_flags)
1054{
1055 const struct iphdr *iph = (const struct iphdr *) skb->data;
1056 struct flowi4 fl4;
1057 struct rtable *rt;
1058
1059 if (!mark)
1060 mark = IP4_REPLY_MARK(net, skb->mark);
1061
1062 __build_flow_key(net, &fl4, NULL, iph, oif,
1063 RT_TOS(iph->tos), protocol, mark, flow_flags);
1064 rt = __ip_route_output_key(net, &fl4);
1065 if (!IS_ERR(rt)) {
1066 __ip_rt_update_pmtu(rt, &fl4, mtu);
1067 ip_rt_put(rt);
1068 }
1069}
1070EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1071
1072static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1073{
1074 const struct iphdr *iph = (const struct iphdr *) skb->data;
1075 struct flowi4 fl4;
1076 struct rtable *rt;
1077
1078 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1079
1080 if (!fl4.flowi4_mark)
1081 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1082
1083 rt = __ip_route_output_key(sock_net(sk), &fl4);
1084 if (!IS_ERR(rt)) {
1085 __ip_rt_update_pmtu(rt, &fl4, mtu);
1086 ip_rt_put(rt);
1087 }
1088}
1089
1090void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1091{
1092 const struct iphdr *iph = (const struct iphdr *) skb->data;
1093 struct flowi4 fl4;
1094 struct rtable *rt;
1095 struct dst_entry *odst = NULL;
1096 bool new = false;
1097 struct net *net = sock_net(sk);
1098
1099 bh_lock_sock(sk);
1100
1101 if (!ip_sk_accept_pmtu(sk))
1102 goto out;
1103
1104 odst = sk_dst_get(sk);
1105
1106 if (sock_owned_by_user(sk) || !odst) {
1107 __ipv4_sk_update_pmtu(skb, sk, mtu);
1108 goto out;
1109 }
1110
1111 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1112
1113 rt = (struct rtable *)odst;
1114 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1115 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1116 if (IS_ERR(rt))
1117 goto out;
1118
1119 new = true;
1120 }
1121
1122 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1123
1124 if (!dst_check(&rt->dst, 0)) {
1125 if (new)
1126 dst_release(&rt->dst);
1127
1128 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1129 if (IS_ERR(rt))
1130 goto out;
1131
1132 new = true;
1133 }
1134
1135 if (new)
1136 sk_dst_set(sk, &rt->dst);
1137
1138out:
1139 bh_unlock_sock(sk);
1140 dst_release(odst);
1141}
1142EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1143
1144void ipv4_redirect(struct sk_buff *skb, struct net *net,
1145 int oif, u32 mark, u8 protocol, int flow_flags)
1146{
1147 const struct iphdr *iph = (const struct iphdr *) skb->data;
1148 struct flowi4 fl4;
1149 struct rtable *rt;
1150
1151 __build_flow_key(net, &fl4, NULL, iph, oif,
1152 RT_TOS(iph->tos), protocol, mark, flow_flags);
1153 rt = __ip_route_output_key(net, &fl4);
1154 if (!IS_ERR(rt)) {
1155 __ip_do_redirect(rt, skb, &fl4, false);
1156 ip_rt_put(rt);
1157 }
1158}
1159EXPORT_SYMBOL_GPL(ipv4_redirect);
1160
1161void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1162{
1163 const struct iphdr *iph = (const struct iphdr *) skb->data;
1164 struct flowi4 fl4;
1165 struct rtable *rt;
1166 struct net *net = sock_net(sk);
1167
1168 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1169 rt = __ip_route_output_key(net, &fl4);
1170 if (!IS_ERR(rt)) {
1171 __ip_do_redirect(rt, skb, &fl4, false);
1172 ip_rt_put(rt);
1173 }
1174}
1175EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1176
1177static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1178{
1179 struct rtable *rt = (struct rtable *) dst;
1180
1181 /* All IPV4 dsts are created with ->obsolete set to the value
1182 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1183 * into this function always.
1184 *
1185 * When a PMTU/redirect information update invalidates a route,
1186 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1187 * DST_OBSOLETE_DEAD by dst_free().
1188 */
1189 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1190 return NULL;
1191 return dst;
1192}
1193
1194static void ipv4_send_dest_unreach(struct sk_buff *skb)
1195{
1196 struct ip_options opt;
1197 int res;
1198
1199 /* Recompile ip options since IPCB may not be valid anymore.
1200 * Also check we have a reasonable ipv4 header.
1201 */
1202 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1203 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1204 return;
1205
1206 memset(&opt, 0, sizeof(opt));
1207 if (ip_hdr(skb)->ihl > 5) {
1208 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1209 return;
1210 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1211
1212 rcu_read_lock();
1213 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1214 rcu_read_unlock();
1215
1216 if (res)
1217 return;
1218 }
1219 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1220}
1221
1222static void ipv4_link_failure(struct sk_buff *skb)
1223{
1224 struct rtable *rt;
1225
1226 ipv4_send_dest_unreach(skb);
1227
1228 rt = skb_rtable(skb);
1229 if (rt)
1230 dst_set_expires(&rt->dst, 0);
1231}
1232
1233static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1234{
1235 pr_debug("%s: %pI4 -> %pI4, %s\n",
1236 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1237 skb->dev ? skb->dev->name : "?");
1238 kfree_skb(skb);
1239 WARN_ON(1);
1240 return 0;
1241}
1242
1243/*
1244 We do not cache source address of outgoing interface,
1245 because it is used only by IP RR, TS and SRR options,
1246 so that it out of fast path.
1247
1248 BTW remember: "addr" is allowed to be not aligned
1249 in IP options!
1250 */
1251
1252void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1253{
1254 __be32 src;
1255
1256 if (rt_is_output_route(rt))
1257 src = ip_hdr(skb)->saddr;
1258 else {
1259 struct fib_result res;
1260 struct flowi4 fl4;
1261 struct iphdr *iph;
1262
1263 iph = ip_hdr(skb);
1264
1265 memset(&fl4, 0, sizeof(fl4));
1266 fl4.daddr = iph->daddr;
1267 fl4.saddr = iph->saddr;
1268 fl4.flowi4_tos = RT_TOS(iph->tos);
1269 fl4.flowi4_oif = rt->dst.dev->ifindex;
1270 fl4.flowi4_iif = skb->dev->ifindex;
1271 fl4.flowi4_mark = skb->mark;
1272
1273 rcu_read_lock();
1274 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1275 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1276 else
1277 src = inet_select_addr(rt->dst.dev,
1278 rt_nexthop(rt, iph->daddr),
1279 RT_SCOPE_UNIVERSE);
1280 rcu_read_unlock();
1281 }
1282 memcpy(addr, &src, 4);
1283}
1284
1285#ifdef CONFIG_IP_ROUTE_CLASSID
1286static void set_class_tag(struct rtable *rt, u32 tag)
1287{
1288 if (!(rt->dst.tclassid & 0xFFFF))
1289 rt->dst.tclassid |= tag & 0xFFFF;
1290 if (!(rt->dst.tclassid & 0xFFFF0000))
1291 rt->dst.tclassid |= tag & 0xFFFF0000;
1292}
1293#endif
1294
1295static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1296{
1297 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1298 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1299 ip_rt_min_advmss);
1300
1301 return min(advmss, IPV4_MAX_PMTU - header_size);
1302}
1303
1304static unsigned int ipv4_mtu(const struct dst_entry *dst)
1305{
1306 const struct rtable *rt = (const struct rtable *) dst;
1307 unsigned int mtu = rt->rt_pmtu;
1308
1309 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1310 mtu = dst_metric_raw(dst, RTAX_MTU);
1311
1312 if (mtu)
1313 return mtu;
1314
1315 mtu = READ_ONCE(dst->dev->mtu);
1316
1317 if (unlikely(ip_mtu_locked(dst))) {
1318 if (rt->rt_uses_gateway && mtu > 576)
1319 mtu = 576;
1320 }
1321
1322 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1323
1324 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1325}
1326
1327static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1328{
1329 struct fnhe_hash_bucket *hash;
1330 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1331 u32 hval = fnhe_hashfun(daddr);
1332
1333 spin_lock_bh(&fnhe_lock);
1334
1335 hash = rcu_dereference_protected(nh->nh_exceptions,
1336 lockdep_is_held(&fnhe_lock));
1337 hash += hval;
1338
1339 fnhe_p = &hash->chain;
1340 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1341 while (fnhe) {
1342 if (fnhe->fnhe_daddr == daddr) {
1343 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1344 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1345 /* set fnhe_daddr to 0 to ensure it won't bind with
1346 * new dsts in rt_bind_exception().
1347 */
1348 fnhe->fnhe_daddr = 0;
1349 fnhe_flush_routes(fnhe);
1350 kfree_rcu(fnhe, rcu);
1351 break;
1352 }
1353 fnhe_p = &fnhe->fnhe_next;
1354 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1355 lockdep_is_held(&fnhe_lock));
1356 }
1357
1358 spin_unlock_bh(&fnhe_lock);
1359}
1360
1361static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1362{
1363 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1364 struct fib_nh_exception *fnhe;
1365 u32 hval;
1366
1367 if (!hash)
1368 return NULL;
1369
1370 hval = fnhe_hashfun(daddr);
1371
1372 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1373 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1374 if (fnhe->fnhe_daddr == daddr) {
1375 if (fnhe->fnhe_expires &&
1376 time_after(jiffies, fnhe->fnhe_expires)) {
1377 ip_del_fnhe(nh, daddr);
1378 break;
1379 }
1380 return fnhe;
1381 }
1382 }
1383 return NULL;
1384}
1385
1386/* MTU selection:
1387 * 1. mtu on route is locked - use it
1388 * 2. mtu from nexthop exception
1389 * 3. mtu from egress device
1390 */
1391
1392u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1393{
1394 struct fib_info *fi = res->fi;
1395 struct fib_nh *nh = &fi->fib_nh[res->nh_sel];
1396 struct net_device *dev = nh->nh_dev;
1397 u32 mtu = 0;
1398
1399 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1400 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1401 mtu = fi->fib_mtu;
1402
1403 if (likely(!mtu)) {
1404 struct fib_nh_exception *fnhe;
1405
1406 fnhe = find_exception(nh, daddr);
1407 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1408 mtu = fnhe->fnhe_pmtu;
1409 }
1410
1411 if (likely(!mtu))
1412 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1413
1414 return mtu - lwtunnel_headroom(nh->nh_lwtstate, mtu);
1415}
1416
1417static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1418 __be32 daddr, const bool do_cache)
1419{
1420 bool ret = false;
1421
1422 spin_lock_bh(&fnhe_lock);
1423
1424 if (daddr == fnhe->fnhe_daddr) {
1425 struct rtable __rcu **porig;
1426 struct rtable *orig;
1427 int genid = fnhe_genid(dev_net(rt->dst.dev));
1428
1429 if (rt_is_input_route(rt))
1430 porig = &fnhe->fnhe_rth_input;
1431 else
1432 porig = &fnhe->fnhe_rth_output;
1433 orig = rcu_dereference(*porig);
1434
1435 if (fnhe->fnhe_genid != genid) {
1436 fnhe->fnhe_genid = genid;
1437 fnhe->fnhe_gw = 0;
1438 fnhe->fnhe_pmtu = 0;
1439 fnhe->fnhe_expires = 0;
1440 fnhe->fnhe_mtu_locked = false;
1441 fnhe_flush_routes(fnhe);
1442 orig = NULL;
1443 }
1444 fill_route_from_fnhe(rt, fnhe);
1445 if (!rt->rt_gateway)
1446 rt->rt_gateway = daddr;
1447
1448 if (do_cache) {
1449 dst_hold(&rt->dst);
1450 rcu_assign_pointer(*porig, rt);
1451 if (orig) {
1452 dst_dev_put(&orig->dst);
1453 dst_release(&orig->dst);
1454 }
1455 ret = true;
1456 }
1457
1458 fnhe->fnhe_stamp = jiffies;
1459 }
1460 spin_unlock_bh(&fnhe_lock);
1461
1462 return ret;
1463}
1464
1465static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1466{
1467 struct rtable *orig, *prev, **p;
1468 bool ret = true;
1469
1470 if (rt_is_input_route(rt)) {
1471 p = (struct rtable **)&nh->nh_rth_input;
1472 } else {
1473 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1474 }
1475 orig = *p;
1476
1477 /* hold dst before doing cmpxchg() to avoid race condition
1478 * on this dst
1479 */
1480 dst_hold(&rt->dst);
1481 prev = cmpxchg(p, orig, rt);
1482 if (prev == orig) {
1483 if (orig) {
1484 rt_add_uncached_list(orig);
1485 dst_release(&orig->dst);
1486 }
1487 } else {
1488 dst_release(&rt->dst);
1489 ret = false;
1490 }
1491
1492 return ret;
1493}
1494
1495struct uncached_list {
1496 spinlock_t lock;
1497 struct list_head head;
1498};
1499
1500static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1501
1502void rt_add_uncached_list(struct rtable *rt)
1503{
1504 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1505
1506 rt->rt_uncached_list = ul;
1507
1508 spin_lock_bh(&ul->lock);
1509 list_add_tail(&rt->rt_uncached, &ul->head);
1510 spin_unlock_bh(&ul->lock);
1511}
1512
1513void rt_del_uncached_list(struct rtable *rt)
1514{
1515 if (!list_empty(&rt->rt_uncached)) {
1516 struct uncached_list *ul = rt->rt_uncached_list;
1517
1518 spin_lock_bh(&ul->lock);
1519 list_del(&rt->rt_uncached);
1520 spin_unlock_bh(&ul->lock);
1521 }
1522}
1523
1524static void ipv4_dst_destroy(struct dst_entry *dst)
1525{
1526 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1527 struct rtable *rt = (struct rtable *)dst;
1528
1529 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1530 kfree(p);
1531
1532 rt_del_uncached_list(rt);
1533}
1534
1535void rt_flush_dev(struct net_device *dev)
1536{
1537 struct net *net = dev_net(dev);
1538 struct rtable *rt;
1539 int cpu;
1540
1541 for_each_possible_cpu(cpu) {
1542 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1543
1544 spin_lock_bh(&ul->lock);
1545 list_for_each_entry(rt, &ul->head, rt_uncached) {
1546 if (rt->dst.dev != dev)
1547 continue;
1548 rt->dst.dev = net->loopback_dev;
1549 dev_hold(rt->dst.dev);
1550 dev_put(dev);
1551 }
1552 spin_unlock_bh(&ul->lock);
1553 }
1554}
1555
1556static bool rt_cache_valid(const struct rtable *rt)
1557{
1558 return rt &&
1559 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1560 !rt_is_expired(rt);
1561}
1562
1563static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1564 const struct fib_result *res,
1565 struct fib_nh_exception *fnhe,
1566 struct fib_info *fi, u16 type, u32 itag,
1567 const bool do_cache)
1568{
1569 bool cached = false;
1570
1571 if (fi) {
1572 struct fib_nh *nh = &FIB_RES_NH(*res);
1573
1574 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1575 rt->rt_gateway = nh->nh_gw;
1576 rt->rt_uses_gateway = 1;
1577 }
1578 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1579 if (fi->fib_metrics != &dst_default_metrics) {
1580 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1581 refcount_inc(&fi->fib_metrics->refcnt);
1582 }
1583#ifdef CONFIG_IP_ROUTE_CLASSID
1584 rt->dst.tclassid = nh->nh_tclassid;
1585#endif
1586 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
1587 if (unlikely(fnhe))
1588 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1589 else if (do_cache)
1590 cached = rt_cache_route(nh, rt);
1591 if (unlikely(!cached)) {
1592 /* Routes we intend to cache in nexthop exception or
1593 * FIB nexthop have the DST_NOCACHE bit clear.
1594 * However, if we are unsuccessful at storing this
1595 * route into the cache we really need to set it.
1596 */
1597 if (!rt->rt_gateway)
1598 rt->rt_gateway = daddr;
1599 rt_add_uncached_list(rt);
1600 }
1601 } else
1602 rt_add_uncached_list(rt);
1603
1604#ifdef CONFIG_IP_ROUTE_CLASSID
1605#ifdef CONFIG_IP_MULTIPLE_TABLES
1606 set_class_tag(rt, res->tclassid);
1607#endif
1608 set_class_tag(rt, itag);
1609#endif
1610}
1611
1612struct rtable *rt_dst_alloc(struct net_device *dev,
1613 unsigned int flags, u16 type,
1614 bool nopolicy, bool noxfrm, bool will_cache)
1615{
1616 struct rtable *rt;
1617
1618 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1619 (will_cache ? 0 : DST_HOST) |
1620 (nopolicy ? DST_NOPOLICY : 0) |
1621 (noxfrm ? DST_NOXFRM : 0));
1622
1623 if (rt) {
1624 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1625 rt->rt_flags = flags;
1626 rt->rt_type = type;
1627 rt->rt_is_input = 0;
1628 rt->rt_iif = 0;
1629 rt->rt_pmtu = 0;
1630 rt->rt_mtu_locked = 0;
1631 rt->rt_gateway = 0;
1632 rt->rt_uses_gateway = 0;
1633 INIT_LIST_HEAD(&rt->rt_uncached);
1634
1635 rt->dst.output = ip_output;
1636 if (flags & RTCF_LOCAL)
1637 rt->dst.input = ip_local_deliver;
1638 }
1639
1640 return rt;
1641}
1642EXPORT_SYMBOL(rt_dst_alloc);
1643
1644/* called in rcu_read_lock() section */
1645int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1646 u8 tos, struct net_device *dev,
1647 struct in_device *in_dev, u32 *itag)
1648{
1649 int err;
1650
1651 /* Primary sanity checks. */
1652 if (!in_dev)
1653 return -EINVAL;
1654
1655 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1656 skb->protocol != htons(ETH_P_IP))
1657 return -EINVAL;
1658
1659 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1660 return -EINVAL;
1661
1662 if (ipv4_is_zeronet(saddr)) {
1663 if (!ipv4_is_local_multicast(daddr))
1664 return -EINVAL;
1665 } else {
1666 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1667 in_dev, itag);
1668 if (err < 0)
1669 return err;
1670 }
1671 return 0;
1672}
1673
1674/* called in rcu_read_lock() section */
1675static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1676 u8 tos, struct net_device *dev, int our)
1677{
1678 struct in_device *in_dev = __in_dev_get_rcu(dev);
1679 unsigned int flags = RTCF_MULTICAST;
1680 struct rtable *rth;
1681 u32 itag = 0;
1682 int err;
1683
1684 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1685 if (err)
1686 return err;
1687
1688 if (our)
1689 flags |= RTCF_LOCAL;
1690
1691 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1692 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1693 if (!rth)
1694 return -ENOBUFS;
1695
1696#ifdef CONFIG_IP_ROUTE_CLASSID
1697 rth->dst.tclassid = itag;
1698#endif
1699 rth->dst.output = ip_rt_bug;
1700 rth->rt_is_input= 1;
1701
1702#ifdef CONFIG_IP_MROUTE
1703 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1704 rth->dst.input = ip_mr_input;
1705#endif
1706 RT_CACHE_STAT_INC(in_slow_mc);
1707
1708 skb_dst_set(skb, &rth->dst);
1709 return 0;
1710}
1711
1712
1713static void ip_handle_martian_source(struct net_device *dev,
1714 struct in_device *in_dev,
1715 struct sk_buff *skb,
1716 __be32 daddr,
1717 __be32 saddr)
1718{
1719 RT_CACHE_STAT_INC(in_martian_src);
1720#ifdef CONFIG_IP_ROUTE_VERBOSE
1721 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1722 /*
1723 * RFC1812 recommendation, if source is martian,
1724 * the only hint is MAC header.
1725 */
1726 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1727 &daddr, &saddr, dev->name);
1728 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1729 print_hex_dump(KERN_WARNING, "ll header: ",
1730 DUMP_PREFIX_OFFSET, 16, 1,
1731 skb_mac_header(skb),
1732 dev->hard_header_len, true);
1733 }
1734 }
1735#endif
1736}
1737
1738/* called in rcu_read_lock() section */
1739static int __mkroute_input(struct sk_buff *skb,
1740 const struct fib_result *res,
1741 struct in_device *in_dev,
1742 __be32 daddr, __be32 saddr, u32 tos)
1743{
1744 struct fib_nh_exception *fnhe;
1745 struct rtable *rth;
1746 int err;
1747 struct in_device *out_dev;
1748 bool do_cache;
1749 u32 itag = 0;
1750
1751 /* get a working reference to the output device */
1752 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1753 if (!out_dev) {
1754 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1755 return -EINVAL;
1756 }
1757
1758 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1759 in_dev->dev, in_dev, &itag);
1760 if (err < 0) {
1761 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1762 saddr);
1763
1764 goto cleanup;
1765 }
1766
1767 do_cache = res->fi && !itag;
1768 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1769 skb->protocol == htons(ETH_P_IP) &&
1770 (IN_DEV_SHARED_MEDIA(out_dev) ||
1771 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1772 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1773
1774 if (skb->protocol != htons(ETH_P_IP)) {
1775 /* Not IP (i.e. ARP). Do not create route, if it is
1776 * invalid for proxy arp. DNAT routes are always valid.
1777 *
1778 * Proxy arp feature have been extended to allow, ARP
1779 * replies back to the same interface, to support
1780 * Private VLAN switch technologies. See arp.c.
1781 */
1782 if (out_dev == in_dev &&
1783 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1784 err = -EINVAL;
1785 goto cleanup;
1786 }
1787 }
1788
1789 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1790 if (do_cache) {
1791 if (fnhe)
1792 rth = rcu_dereference(fnhe->fnhe_rth_input);
1793 else
1794 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1795 if (rt_cache_valid(rth)) {
1796 skb_dst_set_noref(skb, &rth->dst);
1797 goto out;
1798 }
1799 }
1800
1801 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1802 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1803 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1804 if (!rth) {
1805 err = -ENOBUFS;
1806 goto cleanup;
1807 }
1808
1809 rth->rt_is_input = 1;
1810 RT_CACHE_STAT_INC(in_slow_tot);
1811
1812 rth->dst.input = ip_forward;
1813
1814 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1815 do_cache);
1816 lwtunnel_set_redirect(&rth->dst);
1817 skb_dst_set(skb, &rth->dst);
1818out:
1819 err = 0;
1820 cleanup:
1821 return err;
1822}
1823
1824#ifdef CONFIG_IP_ROUTE_MULTIPATH
1825/* To make ICMP packets follow the right flow, the multipath hash is
1826 * calculated from the inner IP addresses.
1827 */
1828static void ip_multipath_l3_keys(const struct sk_buff *skb,
1829 struct flow_keys *hash_keys)
1830{
1831 const struct iphdr *outer_iph = ip_hdr(skb);
1832 const struct iphdr *key_iph = outer_iph;
1833 const struct iphdr *inner_iph;
1834 const struct icmphdr *icmph;
1835 struct iphdr _inner_iph;
1836 struct icmphdr _icmph;
1837
1838 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1839 goto out;
1840
1841 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1842 goto out;
1843
1844 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1845 &_icmph);
1846 if (!icmph)
1847 goto out;
1848
1849 if (icmph->type != ICMP_DEST_UNREACH &&
1850 icmph->type != ICMP_REDIRECT &&
1851 icmph->type != ICMP_TIME_EXCEEDED &&
1852 icmph->type != ICMP_PARAMETERPROB)
1853 goto out;
1854
1855 inner_iph = skb_header_pointer(skb,
1856 outer_iph->ihl * 4 + sizeof(_icmph),
1857 sizeof(_inner_iph), &_inner_iph);
1858 if (!inner_iph)
1859 goto out;
1860
1861 key_iph = inner_iph;
1862out:
1863 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1864 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1865}
1866
1867/* if skb is set it will be used and fl4 can be NULL */
1868int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1869 const struct sk_buff *skb, struct flow_keys *flkeys)
1870{
1871 struct flow_keys hash_keys;
1872 u32 mhash;
1873
1874 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1875 case 0:
1876 memset(&hash_keys, 0, sizeof(hash_keys));
1877 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1878 if (skb) {
1879 ip_multipath_l3_keys(skb, &hash_keys);
1880 } else {
1881 hash_keys.addrs.v4addrs.src = fl4->saddr;
1882 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1883 }
1884 break;
1885 case 1:
1886 /* skb is currently provided only when forwarding */
1887 if (skb) {
1888 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1889 struct flow_keys keys;
1890
1891 /* short-circuit if we already have L4 hash present */
1892 if (skb->l4_hash)
1893 return skb_get_hash_raw(skb) >> 1;
1894
1895 memset(&hash_keys, 0, sizeof(hash_keys));
1896
1897 if (!flkeys) {
1898 skb_flow_dissect_flow_keys(skb, &keys, flag);
1899 flkeys = &keys;
1900 }
1901
1902 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1903 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1904 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1905 hash_keys.ports.src = flkeys->ports.src;
1906 hash_keys.ports.dst = flkeys->ports.dst;
1907 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1908 } else {
1909 memset(&hash_keys, 0, sizeof(hash_keys));
1910 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1911 hash_keys.addrs.v4addrs.src = fl4->saddr;
1912 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1913 hash_keys.ports.src = fl4->fl4_sport;
1914 hash_keys.ports.dst = fl4->fl4_dport;
1915 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1916 }
1917 break;
1918 }
1919 mhash = flow_hash_from_keys(&hash_keys);
1920
1921 return mhash >> 1;
1922}
1923#endif /* CONFIG_IP_ROUTE_MULTIPATH */
1924
1925static int ip_mkroute_input(struct sk_buff *skb,
1926 struct fib_result *res,
1927 struct in_device *in_dev,
1928 __be32 daddr, __be32 saddr, u32 tos,
1929 struct flow_keys *hkeys)
1930{
1931#ifdef CONFIG_IP_ROUTE_MULTIPATH
1932 if (res->fi && res->fi->fib_nhs > 1) {
1933 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
1934
1935 fib_select_multipath(res, h);
1936 }
1937#endif
1938
1939 /* create a routing cache entry */
1940 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1941}
1942
1943/*
1944 * NOTE. We drop all the packets that has local source
1945 * addresses, because every properly looped back packet
1946 * must have correct destination already attached by output routine.
1947 *
1948 * Such approach solves two big problems:
1949 * 1. Not simplex devices are handled properly.
1950 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1951 * called with rcu_read_lock()
1952 */
1953
1954static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1955 u8 tos, struct net_device *dev,
1956 struct fib_result *res)
1957{
1958 struct in_device *in_dev = __in_dev_get_rcu(dev);
1959 struct flow_keys *flkeys = NULL, _flkeys;
1960 struct net *net = dev_net(dev);
1961 struct ip_tunnel_info *tun_info;
1962 int err = -EINVAL;
1963 unsigned int flags = 0;
1964 u32 itag = 0;
1965 struct rtable *rth;
1966 struct flowi4 fl4;
1967 bool do_cache = true;
1968
1969 /* IP on this device is disabled. */
1970
1971 if (!in_dev)
1972 goto out;
1973
1974 /* Check for the most weird martians, which can be not detected
1975 by fib_lookup.
1976 */
1977
1978 tun_info = skb_tunnel_info(skb);
1979 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1980 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1981 else
1982 fl4.flowi4_tun_key.tun_id = 0;
1983 skb_dst_drop(skb);
1984
1985 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1986 goto martian_source;
1987
1988 res->fi = NULL;
1989 res->table = NULL;
1990 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1991 goto brd_input;
1992
1993 /* Accept zero addresses only to limited broadcast;
1994 * I even do not know to fix it or not. Waiting for complains :-)
1995 */
1996 if (ipv4_is_zeronet(saddr))
1997 goto martian_source;
1998
1999 if (ipv4_is_zeronet(daddr))
2000 goto martian_destination;
2001
2002 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2003 * and call it once if daddr or/and saddr are loopback addresses
2004 */
2005 if (ipv4_is_loopback(daddr)) {
2006 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2007 goto martian_destination;
2008 } else if (ipv4_is_loopback(saddr)) {
2009 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2010 goto martian_source;
2011 }
2012
2013 /*
2014 * Now we are ready to route packet.
2015 */
2016 fl4.flowi4_oif = 0;
2017 fl4.flowi4_iif = dev->ifindex;
2018 fl4.flowi4_mark = skb->mark;
2019 fl4.flowi4_tos = tos;
2020 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2021 fl4.flowi4_flags = 0;
2022 fl4.daddr = daddr;
2023 fl4.saddr = saddr;
2024 fl4.flowi4_uid = sock_net_uid(net, NULL);
2025
2026 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2027 flkeys = &_flkeys;
2028 } else {
2029 fl4.flowi4_proto = 0;
2030 fl4.fl4_sport = 0;
2031 fl4.fl4_dport = 0;
2032 }
2033
2034 err = fib_lookup(net, &fl4, res, 0);
2035 if (err != 0) {
2036 if (!IN_DEV_FORWARD(in_dev))
2037 err = -EHOSTUNREACH;
2038 goto no_route;
2039 }
2040
2041 if (res->type == RTN_BROADCAST) {
2042 if (IN_DEV_BFORWARD(in_dev))
2043 goto make_route;
2044 /* not do cache if bc_forwarding is enabled */
2045 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2046 do_cache = false;
2047 goto brd_input;
2048 }
2049
2050 if (res->type == RTN_LOCAL) {
2051 err = fib_validate_source(skb, saddr, daddr, tos,
2052 0, dev, in_dev, &itag);
2053 if (err < 0)
2054 goto martian_source;
2055 goto local_input;
2056 }
2057
2058 if (!IN_DEV_FORWARD(in_dev)) {
2059 err = -EHOSTUNREACH;
2060 goto no_route;
2061 }
2062 if (res->type != RTN_UNICAST)
2063 goto martian_destination;
2064
2065make_route:
2066 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2067out: return err;
2068
2069brd_input:
2070 if (skb->protocol != htons(ETH_P_IP))
2071 goto e_inval;
2072
2073 if (!ipv4_is_zeronet(saddr)) {
2074 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2075 in_dev, &itag);
2076 if (err < 0)
2077 goto martian_source;
2078 }
2079 flags |= RTCF_BROADCAST;
2080 res->type = RTN_BROADCAST;
2081 RT_CACHE_STAT_INC(in_brd);
2082
2083local_input:
2084 do_cache &= res->fi && !itag;
2085 if (do_cache) {
2086 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
2087 if (rt_cache_valid(rth)) {
2088 skb_dst_set_noref(skb, &rth->dst);
2089 err = 0;
2090 goto out;
2091 }
2092 }
2093
2094 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2095 flags | RTCF_LOCAL, res->type,
2096 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2097 if (!rth)
2098 goto e_nobufs;
2099
2100 rth->dst.output= ip_rt_bug;
2101#ifdef CONFIG_IP_ROUTE_CLASSID
2102 rth->dst.tclassid = itag;
2103#endif
2104 rth->rt_is_input = 1;
2105
2106 RT_CACHE_STAT_INC(in_slow_tot);
2107 if (res->type == RTN_UNREACHABLE) {
2108 rth->dst.input= ip_error;
2109 rth->dst.error= -err;
2110 rth->rt_flags &= ~RTCF_LOCAL;
2111 }
2112
2113 if (do_cache) {
2114 struct fib_nh *nh = &FIB_RES_NH(*res);
2115
2116 rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
2117 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2118 WARN_ON(rth->dst.input == lwtunnel_input);
2119 rth->dst.lwtstate->orig_input = rth->dst.input;
2120 rth->dst.input = lwtunnel_input;
2121 }
2122
2123 if (unlikely(!rt_cache_route(nh, rth)))
2124 rt_add_uncached_list(rth);
2125 }
2126 skb_dst_set(skb, &rth->dst);
2127 err = 0;
2128 goto out;
2129
2130no_route:
2131 RT_CACHE_STAT_INC(in_no_route);
2132 res->type = RTN_UNREACHABLE;
2133 res->fi = NULL;
2134 res->table = NULL;
2135 goto local_input;
2136
2137 /*
2138 * Do not cache martian addresses: they should be logged (RFC1812)
2139 */
2140martian_destination:
2141 RT_CACHE_STAT_INC(in_martian_dst);
2142#ifdef CONFIG_IP_ROUTE_VERBOSE
2143 if (IN_DEV_LOG_MARTIANS(in_dev))
2144 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2145 &daddr, &saddr, dev->name);
2146#endif
2147
2148e_inval:
2149 err = -EINVAL;
2150 goto out;
2151
2152e_nobufs:
2153 err = -ENOBUFS;
2154 goto out;
2155
2156martian_source:
2157 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2158 goto out;
2159}
2160
2161int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2162 u8 tos, struct net_device *dev)
2163{
2164 struct fib_result res;
2165 int err;
2166
2167 tos &= IPTOS_RT_MASK;
2168 rcu_read_lock();
2169 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2170 rcu_read_unlock();
2171
2172 return err;
2173}
2174EXPORT_SYMBOL(ip_route_input_noref);
2175
2176/* called with rcu_read_lock held */
2177int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2178 u8 tos, struct net_device *dev, struct fib_result *res)
2179{
2180 /* Multicast recognition logic is moved from route cache to here.
2181 The problem was that too many Ethernet cards have broken/missing
2182 hardware multicast filters :-( As result the host on multicasting
2183 network acquires a lot of useless route cache entries, sort of
2184 SDR messages from all the world. Now we try to get rid of them.
2185 Really, provided software IP multicast filter is organized
2186 reasonably (at least, hashed), it does not result in a slowdown
2187 comparing with route cache reject entries.
2188 Note, that multicast routers are not affected, because
2189 route cache entry is created eventually.
2190 */
2191 if (ipv4_is_multicast(daddr)) {
2192 struct in_device *in_dev = __in_dev_get_rcu(dev);
2193 int our = 0;
2194 int err = -EINVAL;
2195
2196 if (!in_dev)
2197 return err;
2198 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2199 ip_hdr(skb)->protocol);
2200
2201 /* check l3 master if no match yet */
2202 if (!our && netif_is_l3_slave(dev)) {
2203 struct in_device *l3_in_dev;
2204
2205 l3_in_dev = __in_dev_get_rcu(skb->dev);
2206 if (l3_in_dev)
2207 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2208 ip_hdr(skb)->protocol);
2209 }
2210
2211 if (our
2212#ifdef CONFIG_IP_MROUTE
2213 ||
2214 (!ipv4_is_local_multicast(daddr) &&
2215 IN_DEV_MFORWARD(in_dev))
2216#endif
2217 ) {
2218 err = ip_route_input_mc(skb, daddr, saddr,
2219 tos, dev, our);
2220 }
2221 return err;
2222 }
2223
2224 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2225}
2226
2227/* called with rcu_read_lock() */
2228static struct rtable *__mkroute_output(const struct fib_result *res,
2229 const struct flowi4 *fl4, int orig_oif,
2230 struct net_device *dev_out,
2231 unsigned int flags)
2232{
2233 struct fib_info *fi = res->fi;
2234 struct fib_nh_exception *fnhe;
2235 struct in_device *in_dev;
2236 u16 type = res->type;
2237 struct rtable *rth;
2238 bool do_cache;
2239
2240 in_dev = __in_dev_get_rcu(dev_out);
2241 if (!in_dev)
2242 return ERR_PTR(-EINVAL);
2243
2244 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2245 if (ipv4_is_loopback(fl4->saddr) &&
2246 !(dev_out->flags & IFF_LOOPBACK) &&
2247 !netif_is_l3_master(dev_out))
2248 return ERR_PTR(-EINVAL);
2249
2250 if (ipv4_is_lbcast(fl4->daddr))
2251 type = RTN_BROADCAST;
2252 else if (ipv4_is_multicast(fl4->daddr))
2253 type = RTN_MULTICAST;
2254 else if (ipv4_is_zeronet(fl4->daddr))
2255 return ERR_PTR(-EINVAL);
2256
2257 if (dev_out->flags & IFF_LOOPBACK)
2258 flags |= RTCF_LOCAL;
2259
2260 do_cache = true;
2261 if (type == RTN_BROADCAST) {
2262 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2263 fi = NULL;
2264 } else if (type == RTN_MULTICAST) {
2265 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2266 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2267 fl4->flowi4_proto))
2268 flags &= ~RTCF_LOCAL;
2269 else
2270 do_cache = false;
2271 /* If multicast route do not exist use
2272 * default one, but do not gateway in this case.
2273 * Yes, it is hack.
2274 */
2275 if (fi && res->prefixlen < 4)
2276 fi = NULL;
2277 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2278 (orig_oif != dev_out->ifindex)) {
2279 /* For local routes that require a particular output interface
2280 * we do not want to cache the result. Caching the result
2281 * causes incorrect behaviour when there are multiple source
2282 * addresses on the interface, the end result being that if the
2283 * intended recipient is waiting on that interface for the
2284 * packet he won't receive it because it will be delivered on
2285 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2286 * be set to the loopback interface as well.
2287 */
2288 do_cache = false;
2289 }
2290
2291 fnhe = NULL;
2292 do_cache &= fi != NULL;
2293 if (fi) {
2294 struct rtable __rcu **prth;
2295 struct fib_nh *nh = &FIB_RES_NH(*res);
2296
2297 fnhe = find_exception(nh, fl4->daddr);
2298 if (!do_cache)
2299 goto add;
2300 if (fnhe) {
2301 prth = &fnhe->fnhe_rth_output;
2302 } else {
2303 if (unlikely(fl4->flowi4_flags &
2304 FLOWI_FLAG_KNOWN_NH &&
2305 !(nh->nh_gw &&
2306 nh->nh_scope == RT_SCOPE_LINK))) {
2307 do_cache = false;
2308 goto add;
2309 }
2310 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2311 }
2312 rth = rcu_dereference(*prth);
2313 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2314 return rth;
2315 }
2316
2317add:
2318 rth = rt_dst_alloc(dev_out, flags, type,
2319 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2320 IN_DEV_CONF_GET(in_dev, NOXFRM),
2321 do_cache);
2322 if (!rth)
2323 return ERR_PTR(-ENOBUFS);
2324
2325 rth->rt_iif = orig_oif;
2326
2327 RT_CACHE_STAT_INC(out_slow_tot);
2328
2329 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2330 if (flags & RTCF_LOCAL &&
2331 !(dev_out->flags & IFF_LOOPBACK)) {
2332 rth->dst.output = ip_mc_output;
2333 RT_CACHE_STAT_INC(out_slow_mc);
2334 }
2335#ifdef CONFIG_IP_MROUTE
2336 if (type == RTN_MULTICAST) {
2337 if (IN_DEV_MFORWARD(in_dev) &&
2338 !ipv4_is_local_multicast(fl4->daddr)) {
2339 rth->dst.input = ip_mr_input;
2340 rth->dst.output = ip_mc_output;
2341 }
2342 }
2343#endif
2344 }
2345
2346 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2347 lwtunnel_set_redirect(&rth->dst);
2348
2349 return rth;
2350}
2351
2352/*
2353 * Major route resolver routine.
2354 */
2355
2356struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2357 const struct sk_buff *skb)
2358{
2359 __u8 tos = RT_FL_TOS(fl4);
2360 struct fib_result res = {
2361 .type = RTN_UNSPEC,
2362 .fi = NULL,
2363 .table = NULL,
2364 .tclassid = 0,
2365 };
2366 struct rtable *rth;
2367
2368 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2369 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2370 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2371 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2372
2373 rcu_read_lock();
2374 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2375 rcu_read_unlock();
2376
2377 return rth;
2378}
2379EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2380
2381struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2382 struct fib_result *res,
2383 const struct sk_buff *skb)
2384{
2385 struct net_device *dev_out = NULL;
2386 int orig_oif = fl4->flowi4_oif;
2387 unsigned int flags = 0;
2388 struct rtable *rth;
2389 int err;
2390
2391 if (fl4->saddr) {
2392 if (ipv4_is_multicast(fl4->saddr) ||
2393 ipv4_is_lbcast(fl4->saddr) ||
2394 ipv4_is_zeronet(fl4->saddr)) {
2395 rth = ERR_PTR(-EINVAL);
2396 goto out;
2397 }
2398
2399 rth = ERR_PTR(-ENETUNREACH);
2400
2401 /* I removed check for oif == dev_out->oif here.
2402 It was wrong for two reasons:
2403 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2404 is assigned to multiple interfaces.
2405 2. Moreover, we are allowed to send packets with saddr
2406 of another iface. --ANK
2407 */
2408
2409 if (fl4->flowi4_oif == 0 &&
2410 (ipv4_is_multicast(fl4->daddr) ||
2411 ipv4_is_lbcast(fl4->daddr))) {
2412 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2413 dev_out = __ip_dev_find(net, fl4->saddr, false);
2414 if (!dev_out)
2415 goto out;
2416
2417 /* Special hack: user can direct multicasts
2418 and limited broadcast via necessary interface
2419 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2420 This hack is not just for fun, it allows
2421 vic,vat and friends to work.
2422 They bind socket to loopback, set ttl to zero
2423 and expect that it will work.
2424 From the viewpoint of routing cache they are broken,
2425 because we are not allowed to build multicast path
2426 with loopback source addr (look, routing cache
2427 cannot know, that ttl is zero, so that packet
2428 will not leave this host and route is valid).
2429 Luckily, this hack is good workaround.
2430 */
2431
2432 fl4->flowi4_oif = dev_out->ifindex;
2433 goto make_route;
2434 }
2435
2436 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2437 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2438 if (!__ip_dev_find(net, fl4->saddr, false))
2439 goto out;
2440 }
2441 }
2442
2443
2444 if (fl4->flowi4_oif) {
2445 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2446 rth = ERR_PTR(-ENODEV);
2447 if (!dev_out)
2448 goto out;
2449
2450 /* RACE: Check return value of inet_select_addr instead. */
2451 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2452 rth = ERR_PTR(-ENETUNREACH);
2453 goto out;
2454 }
2455 if (ipv4_is_local_multicast(fl4->daddr) ||
2456 ipv4_is_lbcast(fl4->daddr) ||
2457 fl4->flowi4_proto == IPPROTO_IGMP) {
2458 if (!fl4->saddr)
2459 fl4->saddr = inet_select_addr(dev_out, 0,
2460 RT_SCOPE_LINK);
2461 goto make_route;
2462 }
2463 if (!fl4->saddr) {
2464 if (ipv4_is_multicast(fl4->daddr))
2465 fl4->saddr = inet_select_addr(dev_out, 0,
2466 fl4->flowi4_scope);
2467 else if (!fl4->daddr)
2468 fl4->saddr = inet_select_addr(dev_out, 0,
2469 RT_SCOPE_HOST);
2470 }
2471 }
2472
2473 if (!fl4->daddr) {
2474 fl4->daddr = fl4->saddr;
2475 if (!fl4->daddr)
2476 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2477 dev_out = net->loopback_dev;
2478 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2479 res->type = RTN_LOCAL;
2480 flags |= RTCF_LOCAL;
2481 goto make_route;
2482 }
2483
2484 err = fib_lookup(net, fl4, res, 0);
2485 if (err) {
2486 res->fi = NULL;
2487 res->table = NULL;
2488 if (fl4->flowi4_oif &&
2489 (ipv4_is_multicast(fl4->daddr) ||
2490 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2491 /* Apparently, routing tables are wrong. Assume,
2492 that the destination is on link.
2493
2494 WHY? DW.
2495 Because we are allowed to send to iface
2496 even if it has NO routes and NO assigned
2497 addresses. When oif is specified, routing
2498 tables are looked up with only one purpose:
2499 to catch if destination is gatewayed, rather than
2500 direct. Moreover, if MSG_DONTROUTE is set,
2501 we send packet, ignoring both routing tables
2502 and ifaddr state. --ANK
2503
2504
2505 We could make it even if oif is unknown,
2506 likely IPv6, but we do not.
2507 */
2508
2509 if (fl4->saddr == 0)
2510 fl4->saddr = inet_select_addr(dev_out, 0,
2511 RT_SCOPE_LINK);
2512 res->type = RTN_UNICAST;
2513 goto make_route;
2514 }
2515 rth = ERR_PTR(err);
2516 goto out;
2517 }
2518
2519 if (res->type == RTN_LOCAL) {
2520 if (!fl4->saddr) {
2521 if (res->fi->fib_prefsrc)
2522 fl4->saddr = res->fi->fib_prefsrc;
2523 else
2524 fl4->saddr = fl4->daddr;
2525 }
2526
2527 /* L3 master device is the loopback for that domain */
2528 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2529 net->loopback_dev;
2530
2531 /* make sure orig_oif points to fib result device even
2532 * though packet rx/tx happens over loopback or l3mdev
2533 */
2534 orig_oif = FIB_RES_OIF(*res);
2535
2536 fl4->flowi4_oif = dev_out->ifindex;
2537 flags |= RTCF_LOCAL;
2538 goto make_route;
2539 }
2540
2541 fib_select_path(net, res, fl4, skb);
2542
2543 dev_out = FIB_RES_DEV(*res);
2544 fl4->flowi4_oif = dev_out->ifindex;
2545
2546
2547make_route:
2548 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2549
2550out:
2551 return rth;
2552}
2553
2554static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2555{
2556 return NULL;
2557}
2558
2559static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2560{
2561 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2562
2563 return mtu ? : dst->dev->mtu;
2564}
2565
2566static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2567 struct sk_buff *skb, u32 mtu,
2568 bool confirm_neigh)
2569{
2570}
2571
2572static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2573 struct sk_buff *skb)
2574{
2575}
2576
2577static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2578 unsigned long old)
2579{
2580 return NULL;
2581}
2582
2583static struct dst_ops ipv4_dst_blackhole_ops = {
2584 .family = AF_INET,
2585 .check = ipv4_blackhole_dst_check,
2586 .mtu = ipv4_blackhole_mtu,
2587 .default_advmss = ipv4_default_advmss,
2588 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2589 .redirect = ipv4_rt_blackhole_redirect,
2590 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2591 .neigh_lookup = ipv4_neigh_lookup,
2592};
2593
2594struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2595{
2596 struct rtable *ort = (struct rtable *) dst_orig;
2597 struct rtable *rt;
2598
2599 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2600 if (rt) {
2601 struct dst_entry *new = &rt->dst;
2602
2603 new->__use = 1;
2604 new->input = dst_discard;
2605 new->output = dst_discard_out;
2606
2607 new->dev = net->loopback_dev;
2608 if (new->dev)
2609 dev_hold(new->dev);
2610
2611 rt->rt_is_input = ort->rt_is_input;
2612 rt->rt_iif = ort->rt_iif;
2613 rt->rt_pmtu = ort->rt_pmtu;
2614 rt->rt_mtu_locked = ort->rt_mtu_locked;
2615
2616 rt->rt_genid = rt_genid_ipv4(net);
2617 rt->rt_flags = ort->rt_flags;
2618 rt->rt_type = ort->rt_type;
2619 rt->rt_gateway = ort->rt_gateway;
2620 rt->rt_uses_gateway = ort->rt_uses_gateway;
2621
2622 INIT_LIST_HEAD(&rt->rt_uncached);
2623 }
2624
2625 dst_release(dst_orig);
2626
2627 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2628}
2629
2630struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2631 const struct sock *sk)
2632{
2633 struct rtable *rt = __ip_route_output_key(net, flp4);
2634
2635 if (IS_ERR(rt))
2636 return rt;
2637
2638 if (flp4->flowi4_proto)
2639 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2640 flowi4_to_flowi(flp4),
2641 sk, 0);
2642
2643 return rt;
2644}
2645EXPORT_SYMBOL_GPL(ip_route_output_flow);
2646
2647/* called with rcu_read_lock held */
2648static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2649 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2650 struct sk_buff *skb, u32 portid, u32 seq)
2651{
2652 struct rtmsg *r;
2653 struct nlmsghdr *nlh;
2654 unsigned long expires = 0;
2655 u32 error;
2656 u32 metrics[RTAX_MAX];
2657
2658 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2659 if (!nlh)
2660 return -EMSGSIZE;
2661
2662 r = nlmsg_data(nlh);
2663 r->rtm_family = AF_INET;
2664 r->rtm_dst_len = 32;
2665 r->rtm_src_len = 0;
2666 r->rtm_tos = fl4->flowi4_tos;
2667 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2668 if (nla_put_u32(skb, RTA_TABLE, table_id))
2669 goto nla_put_failure;
2670 r->rtm_type = rt->rt_type;
2671 r->rtm_scope = RT_SCOPE_UNIVERSE;
2672 r->rtm_protocol = RTPROT_UNSPEC;
2673 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2674 if (rt->rt_flags & RTCF_NOTIFY)
2675 r->rtm_flags |= RTM_F_NOTIFY;
2676 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2677 r->rtm_flags |= RTCF_DOREDIRECT;
2678
2679 if (nla_put_in_addr(skb, RTA_DST, dst))
2680 goto nla_put_failure;
2681 if (src) {
2682 r->rtm_src_len = 32;
2683 if (nla_put_in_addr(skb, RTA_SRC, src))
2684 goto nla_put_failure;
2685 }
2686 if (rt->dst.dev &&
2687 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2688 goto nla_put_failure;
2689#ifdef CONFIG_IP_ROUTE_CLASSID
2690 if (rt->dst.tclassid &&
2691 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2692 goto nla_put_failure;
2693#endif
2694 if (!rt_is_input_route(rt) &&
2695 fl4->saddr != src) {
2696 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2697 goto nla_put_failure;
2698 }
2699 if (rt->rt_uses_gateway &&
2700 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
2701 goto nla_put_failure;
2702
2703 expires = rt->dst.expires;
2704 if (expires) {
2705 unsigned long now = jiffies;
2706
2707 if (time_before(now, expires))
2708 expires -= now;
2709 else
2710 expires = 0;
2711 }
2712
2713 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2714 if (rt->rt_pmtu && expires)
2715 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2716 if (rt->rt_mtu_locked && expires)
2717 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2718 if (rtnetlink_put_metrics(skb, metrics) < 0)
2719 goto nla_put_failure;
2720
2721 if (fl4->flowi4_mark &&
2722 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2723 goto nla_put_failure;
2724
2725 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2726 nla_put_u32(skb, RTA_UID,
2727 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2728 goto nla_put_failure;
2729
2730 error = rt->dst.error;
2731
2732 if (rt_is_input_route(rt)) {
2733#ifdef CONFIG_IP_MROUTE
2734 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2735 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2736 int err = ipmr_get_route(net, skb,
2737 fl4->saddr, fl4->daddr,
2738 r, portid);
2739
2740 if (err <= 0) {
2741 if (err == 0)
2742 return 0;
2743 goto nla_put_failure;
2744 }
2745 } else
2746#endif
2747 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2748 goto nla_put_failure;
2749 }
2750
2751 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2752 goto nla_put_failure;
2753
2754 nlmsg_end(skb, nlh);
2755 return 0;
2756
2757nla_put_failure:
2758 nlmsg_cancel(skb, nlh);
2759 return -EMSGSIZE;
2760}
2761
2762static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2763 u8 ip_proto, __be16 sport,
2764 __be16 dport)
2765{
2766 struct sk_buff *skb;
2767 struct iphdr *iph;
2768
2769 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2770 if (!skb)
2771 return NULL;
2772
2773 /* Reserve room for dummy headers, this skb can pass
2774 * through good chunk of routing engine.
2775 */
2776 skb_reset_mac_header(skb);
2777 skb_reset_network_header(skb);
2778 skb->protocol = htons(ETH_P_IP);
2779 iph = skb_put(skb, sizeof(struct iphdr));
2780 iph->protocol = ip_proto;
2781 iph->saddr = src;
2782 iph->daddr = dst;
2783 iph->version = 0x4;
2784 iph->frag_off = 0;
2785 iph->ihl = 0x5;
2786 skb_set_transport_header(skb, skb->len);
2787
2788 switch (iph->protocol) {
2789 case IPPROTO_UDP: {
2790 struct udphdr *udph;
2791
2792 udph = skb_put_zero(skb, sizeof(struct udphdr));
2793 udph->source = sport;
2794 udph->dest = dport;
2795 udph->len = sizeof(struct udphdr);
2796 udph->check = 0;
2797 break;
2798 }
2799 case IPPROTO_TCP: {
2800 struct tcphdr *tcph;
2801
2802 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2803 tcph->source = sport;
2804 tcph->dest = dport;
2805 tcph->doff = sizeof(struct tcphdr) / 4;
2806 tcph->rst = 1;
2807 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2808 src, dst, 0);
2809 break;
2810 }
2811 case IPPROTO_ICMP: {
2812 struct icmphdr *icmph;
2813
2814 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2815 icmph->type = ICMP_ECHO;
2816 icmph->code = 0;
2817 }
2818 }
2819
2820 return skb;
2821}
2822
2823static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2824 struct netlink_ext_ack *extack)
2825{
2826 struct net *net = sock_net(in_skb->sk);
2827 struct nlattr *tb[RTA_MAX+1];
2828 u32 table_id = RT_TABLE_MAIN;
2829 __be16 sport = 0, dport = 0;
2830 struct fib_result res = {};
2831 u8 ip_proto = IPPROTO_UDP;
2832 struct rtable *rt = NULL;
2833 struct sk_buff *skb;
2834 struct rtmsg *rtm;
2835 struct flowi4 fl4;
2836 __be32 dst = 0;
2837 __be32 src = 0;
2838 kuid_t uid;
2839 u32 iif;
2840 int err;
2841 int mark;
2842
2843 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
2844 extack);
2845 if (err < 0)
2846 return err;
2847
2848 rtm = nlmsg_data(nlh);
2849 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2850 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2851 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2852 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2853 if (tb[RTA_UID])
2854 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2855 else
2856 uid = (iif ? INVALID_UID : current_uid());
2857
2858 if (tb[RTA_IP_PROTO]) {
2859 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
2860 &ip_proto, AF_INET, extack);
2861 if (err)
2862 return err;
2863 }
2864
2865 if (tb[RTA_SPORT])
2866 sport = nla_get_be16(tb[RTA_SPORT]);
2867
2868 if (tb[RTA_DPORT])
2869 dport = nla_get_be16(tb[RTA_DPORT]);
2870
2871 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
2872 if (!skb)
2873 return -ENOBUFS;
2874
2875 memset(&fl4, 0, sizeof(fl4));
2876 fl4.daddr = dst;
2877 fl4.saddr = src;
2878 fl4.flowi4_tos = rtm->rtm_tos;
2879 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2880 fl4.flowi4_mark = mark;
2881 fl4.flowi4_uid = uid;
2882 if (sport)
2883 fl4.fl4_sport = sport;
2884 if (dport)
2885 fl4.fl4_dport = dport;
2886 fl4.flowi4_proto = ip_proto;
2887
2888 rcu_read_lock();
2889
2890 if (iif) {
2891 struct net_device *dev;
2892
2893 dev = dev_get_by_index_rcu(net, iif);
2894 if (!dev) {
2895 err = -ENODEV;
2896 goto errout_rcu;
2897 }
2898
2899 fl4.flowi4_iif = iif; /* for rt_fill_info */
2900 skb->dev = dev;
2901 skb->mark = mark;
2902 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
2903 dev, &res);
2904
2905 rt = skb_rtable(skb);
2906 if (err == 0 && rt->dst.error)
2907 err = -rt->dst.error;
2908 } else {
2909 fl4.flowi4_iif = LOOPBACK_IFINDEX;
2910 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2911 err = 0;
2912 if (IS_ERR(rt))
2913 err = PTR_ERR(rt);
2914 else
2915 skb_dst_set(skb, &rt->dst);
2916 }
2917
2918 if (err)
2919 goto errout_rcu;
2920
2921 if (rtm->rtm_flags & RTM_F_NOTIFY)
2922 rt->rt_flags |= RTCF_NOTIFY;
2923
2924 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
2925 table_id = res.table ? res.table->tb_id : 0;
2926
2927 /* reset skb for netlink reply msg */
2928 skb_trim(skb, 0);
2929 skb_reset_network_header(skb);
2930 skb_reset_transport_header(skb);
2931 skb_reset_mac_header(skb);
2932
2933 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
2934 if (!res.fi) {
2935 err = fib_props[res.type].error;
2936 if (!err)
2937 err = -EHOSTUNREACH;
2938 goto errout_rcu;
2939 }
2940 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
2941 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
2942 rt->rt_type, res.prefix, res.prefixlen,
2943 fl4.flowi4_tos, res.fi, 0);
2944 } else {
2945 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
2946 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
2947 }
2948 if (err < 0)
2949 goto errout_rcu;
2950
2951 rcu_read_unlock();
2952
2953 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2954
2955errout_free:
2956 return err;
2957errout_rcu:
2958 rcu_read_unlock();
2959 kfree_skb(skb);
2960 goto errout_free;
2961}
2962
2963void ip_rt_multicast_event(struct in_device *in_dev)
2964{
2965 rt_cache_flush(dev_net(in_dev->dev));
2966}
2967
2968#ifdef CONFIG_SYSCTL
2969static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2970static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2971static int ip_rt_gc_elasticity __read_mostly = 8;
2972static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
2973
2974static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2975 void __user *buffer,
2976 size_t *lenp, loff_t *ppos)
2977{
2978 struct net *net = (struct net *)__ctl->extra1;
2979
2980 if (write) {
2981 rt_cache_flush(net);
2982 fnhe_genid_bump(net);
2983 return 0;
2984 }
2985
2986 return -EINVAL;
2987}
2988
2989static struct ctl_table ipv4_route_table[] = {
2990 {
2991 .procname = "gc_thresh",
2992 .data = &ipv4_dst_ops.gc_thresh,
2993 .maxlen = sizeof(int),
2994 .mode = 0644,
2995 .proc_handler = proc_dointvec,
2996 },
2997 {
2998 .procname = "max_size",
2999 .data = &ip_rt_max_size,
3000 .maxlen = sizeof(int),
3001 .mode = 0644,
3002 .proc_handler = proc_dointvec,
3003 },
3004 {
3005 /* Deprecated. Use gc_min_interval_ms */
3006
3007 .procname = "gc_min_interval",
3008 .data = &ip_rt_gc_min_interval,
3009 .maxlen = sizeof(int),
3010 .mode = 0644,
3011 .proc_handler = proc_dointvec_jiffies,
3012 },
3013 {
3014 .procname = "gc_min_interval_ms",
3015 .data = &ip_rt_gc_min_interval,
3016 .maxlen = sizeof(int),
3017 .mode = 0644,
3018 .proc_handler = proc_dointvec_ms_jiffies,
3019 },
3020 {
3021 .procname = "gc_timeout",
3022 .data = &ip_rt_gc_timeout,
3023 .maxlen = sizeof(int),
3024 .mode = 0644,
3025 .proc_handler = proc_dointvec_jiffies,
3026 },
3027 {
3028 .procname = "gc_interval",
3029 .data = &ip_rt_gc_interval,
3030 .maxlen = sizeof(int),
3031 .mode = 0644,
3032 .proc_handler = proc_dointvec_jiffies,
3033 },
3034 {
3035 .procname = "redirect_load",
3036 .data = &ip_rt_redirect_load,
3037 .maxlen = sizeof(int),
3038 .mode = 0644,
3039 .proc_handler = proc_dointvec,
3040 },
3041 {
3042 .procname = "redirect_number",
3043 .data = &ip_rt_redirect_number,
3044 .maxlen = sizeof(int),
3045 .mode = 0644,
3046 .proc_handler = proc_dointvec,
3047 },
3048 {
3049 .procname = "redirect_silence",
3050 .data = &ip_rt_redirect_silence,
3051 .maxlen = sizeof(int),
3052 .mode = 0644,
3053 .proc_handler = proc_dointvec,
3054 },
3055 {
3056 .procname = "error_cost",
3057 .data = &ip_rt_error_cost,
3058 .maxlen = sizeof(int),
3059 .mode = 0644,
3060 .proc_handler = proc_dointvec,
3061 },
3062 {
3063 .procname = "error_burst",
3064 .data = &ip_rt_error_burst,
3065 .maxlen = sizeof(int),
3066 .mode = 0644,
3067 .proc_handler = proc_dointvec,
3068 },
3069 {
3070 .procname = "gc_elasticity",
3071 .data = &ip_rt_gc_elasticity,
3072 .maxlen = sizeof(int),
3073 .mode = 0644,
3074 .proc_handler = proc_dointvec,
3075 },
3076 {
3077 .procname = "mtu_expires",
3078 .data = &ip_rt_mtu_expires,
3079 .maxlen = sizeof(int),
3080 .mode = 0644,
3081 .proc_handler = proc_dointvec_jiffies,
3082 },
3083 {
3084 .procname = "min_pmtu",
3085 .data = &ip_rt_min_pmtu,
3086 .maxlen = sizeof(int),
3087 .mode = 0644,
3088 .proc_handler = proc_dointvec_minmax,
3089 .extra1 = &ip_min_valid_pmtu,
3090 },
3091 {
3092 .procname = "min_adv_mss",
3093 .data = &ip_rt_min_advmss,
3094 .maxlen = sizeof(int),
3095 .mode = 0644,
3096 .proc_handler = proc_dointvec,
3097 },
3098 { }
3099};
3100
3101static struct ctl_table ipv4_route_flush_table[] = {
3102 {
3103 .procname = "flush",
3104 .maxlen = sizeof(int),
3105 .mode = 0200,
3106 .proc_handler = ipv4_sysctl_rtcache_flush,
3107 },
3108 { },
3109};
3110
3111static __net_init int sysctl_route_net_init(struct net *net)
3112{
3113 struct ctl_table *tbl;
3114
3115 tbl = ipv4_route_flush_table;
3116 if (!net_eq(net, &init_net)) {
3117 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3118 if (!tbl)
3119 goto err_dup;
3120
3121 /* Don't export sysctls to unprivileged users */
3122 if (net->user_ns != &init_user_ns)
3123 tbl[0].procname = NULL;
3124 }
3125 tbl[0].extra1 = net;
3126
3127 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3128 if (!net->ipv4.route_hdr)
3129 goto err_reg;
3130 return 0;
3131
3132err_reg:
3133 if (tbl != ipv4_route_flush_table)
3134 kfree(tbl);
3135err_dup:
3136 return -ENOMEM;
3137}
3138
3139static __net_exit void sysctl_route_net_exit(struct net *net)
3140{
3141 struct ctl_table *tbl;
3142
3143 tbl = net->ipv4.route_hdr->ctl_table_arg;
3144 unregister_net_sysctl_table(net->ipv4.route_hdr);
3145 BUG_ON(tbl == ipv4_route_flush_table);
3146 kfree(tbl);
3147}
3148
3149static __net_initdata struct pernet_operations sysctl_route_ops = {
3150 .init = sysctl_route_net_init,
3151 .exit = sysctl_route_net_exit,
3152};
3153#endif
3154
3155static __net_init int rt_genid_init(struct net *net)
3156{
3157 atomic_set(&net->ipv4.rt_genid, 0);
3158 atomic_set(&net->fnhe_genid, 0);
3159 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3160 return 0;
3161}
3162
3163static __net_initdata struct pernet_operations rt_genid_ops = {
3164 .init = rt_genid_init,
3165};
3166
3167static int __net_init ipv4_inetpeer_init(struct net *net)
3168{
3169 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3170
3171 if (!bp)
3172 return -ENOMEM;
3173 inet_peer_base_init(bp);
3174 net->ipv4.peers = bp;
3175 return 0;
3176}
3177
3178static void __net_exit ipv4_inetpeer_exit(struct net *net)
3179{
3180 struct inet_peer_base *bp = net->ipv4.peers;
3181
3182 net->ipv4.peers = NULL;
3183 inetpeer_invalidate_tree(bp);
3184 kfree(bp);
3185}
3186
3187static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3188 .init = ipv4_inetpeer_init,
3189 .exit = ipv4_inetpeer_exit,
3190};
3191
3192#ifdef CONFIG_IP_ROUTE_CLASSID
3193struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3194#endif /* CONFIG_IP_ROUTE_CLASSID */
3195
3196int __init ip_rt_init(void)
3197{
3198 int cpu;
3199
3200 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3201 GFP_KERNEL);
3202 if (!ip_idents)
3203 panic("IP: failed to allocate ip_idents\n");
3204
3205 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3206
3207 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3208 if (!ip_tstamps)
3209 panic("IP: failed to allocate ip_tstamps\n");
3210
3211 for_each_possible_cpu(cpu) {
3212 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3213
3214 INIT_LIST_HEAD(&ul->head);
3215 spin_lock_init(&ul->lock);
3216 }
3217#ifdef CONFIG_IP_ROUTE_CLASSID
3218 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3219 if (!ip_rt_acct)
3220 panic("IP: failed to allocate ip_rt_acct\n");
3221#endif
3222
3223 ipv4_dst_ops.kmem_cachep =
3224 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3225 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3226
3227 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3228
3229 if (dst_entries_init(&ipv4_dst_ops) < 0)
3230 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3231
3232 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3233 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3234
3235 ipv4_dst_ops.gc_thresh = ~0;
3236 ip_rt_max_size = INT_MAX;
3237
3238 devinet_init();
3239 ip_fib_init();
3240
3241 if (ip_rt_proc_init())
3242 pr_err("Unable to create route proc files\n");
3243#ifdef CONFIG_XFRM
3244 xfrm_init();
3245 xfrm4_init();
3246#endif
3247 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3248 RTNL_FLAG_DOIT_UNLOCKED);
3249
3250#ifdef CONFIG_SYSCTL
3251 register_pernet_subsys(&sysctl_route_ops);
3252#endif
3253 register_pernet_subsys(&rt_genid_ops);
3254 register_pernet_subsys(&ipv4_inetpeer_ops);
3255 return 0;
3256}
3257
3258#ifdef CONFIG_SYSCTL
3259/*
3260 * We really need to sanitize the damn ipv4 init order, then all
3261 * this nonsense will go away.
3262 */
3263void __init ip_static_sysctl_init(void)
3264{
3265 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3266}
3267#endif