| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | *		INETPEER - A storage for permanent information about peers | 
|  | 3 | * | 
|  | 4 | *  This source is covered by the GNU GPL, the same as all kernel sources. | 
|  | 5 | * | 
|  | 6 | *  Authors:	Andrey V. Savochkin <saw@msu.ru> | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | #include <linux/cache.h> | 
|  | 10 | #include <linux/module.h> | 
|  | 11 | #include <linux/types.h> | 
|  | 12 | #include <linux/slab.h> | 
|  | 13 | #include <linux/interrupt.h> | 
|  | 14 | #include <linux/spinlock.h> | 
|  | 15 | #include <linux/random.h> | 
|  | 16 | #include <linux/timer.h> | 
|  | 17 | #include <linux/time.h> | 
|  | 18 | #include <linux/kernel.h> | 
|  | 19 | #include <linux/mm.h> | 
|  | 20 | #include <linux/net.h> | 
|  | 21 | #include <linux/workqueue.h> | 
|  | 22 | #include <net/ip.h> | 
|  | 23 | #include <net/inetpeer.h> | 
|  | 24 | #include <net/secure_seq.h> | 
|  | 25 |  | 
|  | 26 | /* | 
|  | 27 | *  Theory of operations. | 
|  | 28 | *  We keep one entry for each peer IP address.  The nodes contains long-living | 
|  | 29 | *  information about the peer which doesn't depend on routes. | 
|  | 30 | * | 
|  | 31 | *  Nodes are removed only when reference counter goes to 0. | 
|  | 32 | *  When it's happened the node may be removed when a sufficient amount of | 
|  | 33 | *  time has been passed since its last use.  The less-recently-used entry can | 
|  | 34 | *  also be removed if the pool is overloaded i.e. if the total amount of | 
|  | 35 | *  entries is greater-or-equal than the threshold. | 
|  | 36 | * | 
|  | 37 | *  Node pool is organised as an RB tree. | 
|  | 38 | *  Such an implementation has been chosen not just for fun.  It's a way to | 
|  | 39 | *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge | 
|  | 40 | *  amount of long living nodes in a single hash slot would significantly delay | 
|  | 41 | *  lookups performed with disabled BHs. | 
|  | 42 | * | 
|  | 43 | *  Serialisation issues. | 
|  | 44 | *  1.  Nodes may appear in the tree only with the pool lock held. | 
|  | 45 | *  2.  Nodes may disappear from the tree only with the pool lock held | 
|  | 46 | *      AND reference count being 0. | 
|  | 47 | *  3.  Global variable peer_total is modified under the pool lock. | 
|  | 48 | *  4.  struct inet_peer fields modification: | 
|  | 49 | *		rb_node: pool lock | 
|  | 50 | *		refcnt: atomically against modifications on other CPU; | 
|  | 51 | *		   usually under some other lock to prevent node disappearing | 
|  | 52 | *		daddr: unchangeable | 
|  | 53 | */ | 
|  | 54 |  | 
|  | 55 | static struct kmem_cache *peer_cachep __ro_after_init; | 
|  | 56 |  | 
|  | 57 | void inet_peer_base_init(struct inet_peer_base *bp) | 
|  | 58 | { | 
|  | 59 | bp->rb_root = RB_ROOT; | 
|  | 60 | seqlock_init(&bp->lock); | 
|  | 61 | bp->total = 0; | 
|  | 62 | } | 
|  | 63 | EXPORT_SYMBOL_GPL(inet_peer_base_init); | 
|  | 64 |  | 
|  | 65 | #define PEER_MAX_GC 32 | 
|  | 66 |  | 
|  | 67 | /* Exported for sysctl_net_ipv4.  */ | 
|  | 68 | int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more | 
|  | 69 | * aggressively at this stage */ | 
|  | 70 | int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */ | 
|  | 71 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */ | 
|  | 72 |  | 
|  | 73 | /* Called from ip_output.c:ip_init  */ | 
|  | 74 | void __init inet_initpeers(void) | 
|  | 75 | { | 
|  | 76 | struct sysinfo si; | 
|  | 77 |  | 
|  | 78 | /* Use the straight interface to information about memory. */ | 
|  | 79 | si_meminfo(&si); | 
|  | 80 | /* The values below were suggested by Alexey Kuznetsov | 
|  | 81 | * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values | 
|  | 82 | * myself.  --SAW | 
|  | 83 | */ | 
|  | 84 | if (si.totalram <= (32768*1024)/PAGE_SIZE) | 
|  | 85 | inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ | 
|  | 86 | if (si.totalram <= (16384*1024)/PAGE_SIZE) | 
|  | 87 | inet_peer_threshold >>= 1; /* about 512KB */ | 
|  | 88 | if (si.totalram <= (8192*1024)/PAGE_SIZE) | 
|  | 89 | inet_peer_threshold >>= 2; /* about 128KB */ | 
|  | 90 |  | 
|  | 91 | peer_cachep = kmem_cache_create("inet_peer_cache", | 
|  | 92 | sizeof(struct inet_peer), | 
|  | 93 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, | 
|  | 94 | NULL); | 
|  | 95 | } | 
|  | 96 |  | 
|  | 97 | /* Called with rcu_read_lock() or base->lock held */ | 
|  | 98 | static struct inet_peer *lookup(const struct inetpeer_addr *daddr, | 
|  | 99 | struct inet_peer_base *base, | 
|  | 100 | unsigned int seq, | 
|  | 101 | struct inet_peer *gc_stack[], | 
|  | 102 | unsigned int *gc_cnt, | 
|  | 103 | struct rb_node **parent_p, | 
|  | 104 | struct rb_node ***pp_p) | 
|  | 105 | { | 
|  | 106 | struct rb_node **pp, *parent, *next; | 
|  | 107 | struct inet_peer *p; | 
|  | 108 |  | 
|  | 109 | pp = &base->rb_root.rb_node; | 
|  | 110 | parent = NULL; | 
|  | 111 | while (1) { | 
|  | 112 | int cmp; | 
|  | 113 |  | 
|  | 114 | next = rcu_dereference_raw(*pp); | 
|  | 115 | if (!next) | 
|  | 116 | break; | 
|  | 117 | parent = next; | 
|  | 118 | p = rb_entry(parent, struct inet_peer, rb_node); | 
|  | 119 | cmp = inetpeer_addr_cmp(daddr, &p->daddr); | 
|  | 120 | if (cmp == 0) { | 
|  | 121 | if (!refcount_inc_not_zero(&p->refcnt)) | 
|  | 122 | break; | 
|  | 123 | return p; | 
|  | 124 | } | 
|  | 125 | if (gc_stack) { | 
|  | 126 | if (*gc_cnt < PEER_MAX_GC) | 
|  | 127 | gc_stack[(*gc_cnt)++] = p; | 
|  | 128 | } else if (unlikely(read_seqretry(&base->lock, seq))) { | 
|  | 129 | break; | 
|  | 130 | } | 
|  | 131 | if (cmp == -1) | 
|  | 132 | pp = &next->rb_left; | 
|  | 133 | else | 
|  | 134 | pp = &next->rb_right; | 
|  | 135 | } | 
|  | 136 | *parent_p = parent; | 
|  | 137 | *pp_p = pp; | 
|  | 138 | return NULL; | 
|  | 139 | } | 
|  | 140 |  | 
|  | 141 | static void inetpeer_free_rcu(struct rcu_head *head) | 
|  | 142 | { | 
|  | 143 | kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | /* perform garbage collect on all items stacked during a lookup */ | 
|  | 147 | static void inet_peer_gc(struct inet_peer_base *base, | 
|  | 148 | struct inet_peer *gc_stack[], | 
|  | 149 | unsigned int gc_cnt) | 
|  | 150 | { | 
|  | 151 | struct inet_peer *p; | 
|  | 152 | __u32 delta, ttl; | 
|  | 153 | int i; | 
|  | 154 |  | 
|  | 155 | if (base->total >= inet_peer_threshold) | 
|  | 156 | ttl = 0; /* be aggressive */ | 
|  | 157 | else | 
|  | 158 | ttl = inet_peer_maxttl | 
|  | 159 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * | 
|  | 160 | base->total / inet_peer_threshold * HZ; | 
|  | 161 | for (i = 0; i < gc_cnt; i++) { | 
|  | 162 | p = gc_stack[i]; | 
|  | 163 |  | 
|  | 164 | /* The READ_ONCE() pairs with the WRITE_ONCE() | 
|  | 165 | * in inet_putpeer() | 
|  | 166 | */ | 
|  | 167 | delta = (__u32)jiffies - READ_ONCE(p->dtime); | 
|  | 168 |  | 
|  | 169 | if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) | 
|  | 170 | gc_stack[i] = NULL; | 
|  | 171 | } | 
|  | 172 | for (i = 0; i < gc_cnt; i++) { | 
|  | 173 | p = gc_stack[i]; | 
|  | 174 | if (p) { | 
|  | 175 | rb_erase(&p->rb_node, &base->rb_root); | 
|  | 176 | base->total--; | 
|  | 177 | call_rcu(&p->rcu, inetpeer_free_rcu); | 
|  | 178 | } | 
|  | 179 | } | 
|  | 180 | } | 
|  | 181 |  | 
|  | 182 | struct inet_peer *inet_getpeer(struct inet_peer_base *base, | 
|  | 183 | const struct inetpeer_addr *daddr, | 
|  | 184 | int create) | 
|  | 185 | { | 
|  | 186 | struct inet_peer *p, *gc_stack[PEER_MAX_GC]; | 
|  | 187 | struct rb_node **pp, *parent; | 
|  | 188 | unsigned int gc_cnt, seq; | 
|  | 189 | int invalidated; | 
|  | 190 |  | 
|  | 191 | /* Attempt a lockless lookup first. | 
|  | 192 | * Because of a concurrent writer, we might not find an existing entry. | 
|  | 193 | */ | 
|  | 194 | rcu_read_lock(); | 
|  | 195 | seq = read_seqbegin(&base->lock); | 
|  | 196 | p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); | 
|  | 197 | invalidated = read_seqretry(&base->lock, seq); | 
|  | 198 | rcu_read_unlock(); | 
|  | 199 |  | 
|  | 200 | if (p) | 
|  | 201 | return p; | 
|  | 202 |  | 
|  | 203 | /* If no writer did a change during our lookup, we can return early. */ | 
|  | 204 | if (!create && !invalidated) | 
|  | 205 | return NULL; | 
|  | 206 |  | 
|  | 207 | /* retry an exact lookup, taking the lock before. | 
|  | 208 | * At least, nodes should be hot in our cache. | 
|  | 209 | */ | 
|  | 210 | parent = NULL; | 
|  | 211 | write_seqlock_bh(&base->lock); | 
|  | 212 |  | 
|  | 213 | gc_cnt = 0; | 
|  | 214 | p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); | 
|  | 215 | if (!p && create) { | 
|  | 216 | p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); | 
|  | 217 | if (p) { | 
|  | 218 | p->daddr = *daddr; | 
|  | 219 | p->dtime = (__u32)jiffies; | 
|  | 220 | refcount_set(&p->refcnt, 2); | 
|  | 221 | atomic_set(&p->rid, 0); | 
|  | 222 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | 
|  | 223 | p->rate_tokens = 0; | 
|  | 224 | p->n_redirects = 0; | 
|  | 225 | /* 60*HZ is arbitrary, but chosen enough high so that the first | 
|  | 226 | * calculation of tokens is at its maximum. | 
|  | 227 | */ | 
|  | 228 | p->rate_last = jiffies - 60*HZ; | 
|  | 229 |  | 
|  | 230 | rb_link_node(&p->rb_node, parent, pp); | 
|  | 231 | rb_insert_color(&p->rb_node, &base->rb_root); | 
|  | 232 | base->total++; | 
|  | 233 | } | 
|  | 234 | } | 
|  | 235 | if (gc_cnt) | 
|  | 236 | inet_peer_gc(base, gc_stack, gc_cnt); | 
|  | 237 | write_sequnlock_bh(&base->lock); | 
|  | 238 |  | 
|  | 239 | return p; | 
|  | 240 | } | 
|  | 241 | EXPORT_SYMBOL_GPL(inet_getpeer); | 
|  | 242 |  | 
|  | 243 | void inet_putpeer(struct inet_peer *p) | 
|  | 244 | { | 
|  | 245 | /* The WRITE_ONCE() pairs with itself (we run lockless) | 
|  | 246 | * and the READ_ONCE() in inet_peer_gc() | 
|  | 247 | */ | 
|  | 248 | WRITE_ONCE(p->dtime, (__u32)jiffies); | 
|  | 249 |  | 
|  | 250 | if (refcount_dec_and_test(&p->refcnt)) | 
|  | 251 | call_rcu(&p->rcu, inetpeer_free_rcu); | 
|  | 252 | } | 
|  | 253 | EXPORT_SYMBOL_GPL(inet_putpeer); | 
|  | 254 |  | 
|  | 255 | /* | 
|  | 256 | *	Check transmit rate limitation for given message. | 
|  | 257 | *	The rate information is held in the inet_peer entries now. | 
|  | 258 | *	This function is generic and could be used for other purposes | 
|  | 259 | *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. | 
|  | 260 | * | 
|  | 261 | *	Note that the same inet_peer fields are modified by functions in | 
|  | 262 | *	route.c too, but these work for packet destinations while xrlim_allow | 
|  | 263 | *	works for icmp destinations. This means the rate limiting information | 
|  | 264 | *	for one "ip object" is shared - and these ICMPs are twice limited: | 
|  | 265 | *	by source and by destination. | 
|  | 266 | * | 
|  | 267 | *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate | 
|  | 268 | *			  SHOULD allow setting of rate limits | 
|  | 269 | * | 
|  | 270 | * 	Shared between ICMPv4 and ICMPv6. | 
|  | 271 | */ | 
|  | 272 | #define XRLIM_BURST_FACTOR 6 | 
|  | 273 | bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | 
|  | 274 | { | 
|  | 275 | unsigned long now, token; | 
|  | 276 | bool rc = false; | 
|  | 277 |  | 
|  | 278 | if (!peer) | 
|  | 279 | return true; | 
|  | 280 |  | 
|  | 281 | token = peer->rate_tokens; | 
|  | 282 | now = jiffies; | 
|  | 283 | token += now - peer->rate_last; | 
|  | 284 | peer->rate_last = now; | 
|  | 285 | if (token > XRLIM_BURST_FACTOR * timeout) | 
|  | 286 | token = XRLIM_BURST_FACTOR * timeout; | 
|  | 287 | if (token >= timeout) { | 
|  | 288 | token -= timeout; | 
|  | 289 | rc = true; | 
|  | 290 | } | 
|  | 291 | peer->rate_tokens = token; | 
|  | 292 | return rc; | 
|  | 293 | } | 
|  | 294 | EXPORT_SYMBOL(inet_peer_xrlim_allow); | 
|  | 295 |  | 
|  | 296 | void inetpeer_invalidate_tree(struct inet_peer_base *base) | 
|  | 297 | { | 
|  | 298 | struct rb_node *p = rb_first(&base->rb_root); | 
|  | 299 |  | 
|  | 300 | while (p) { | 
|  | 301 | struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node); | 
|  | 302 |  | 
|  | 303 | p = rb_next(p); | 
|  | 304 | rb_erase(&peer->rb_node, &base->rb_root); | 
|  | 305 | inet_putpeer(peer); | 
|  | 306 | cond_resched(); | 
|  | 307 | } | 
|  | 308 |  | 
|  | 309 | base->total = 0; | 
|  | 310 | } | 
|  | 311 | EXPORT_SYMBOL(inetpeer_invalidate_tree); |