rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * net/sched/cls_tcindex.c Packet classifier for skb->tc_index |
| 3 | * |
| 4 | * Written 1998,1999 by Werner Almesberger, EPFL ICA |
| 5 | */ |
| 6 | |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/types.h> |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/skbuff.h> |
| 11 | #include <linux/errno.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <net/act_api.h> |
| 14 | #include <net/netlink.h> |
| 15 | #include <net/pkt_cls.h> |
| 16 | |
| 17 | /* |
| 18 | * Passing parameters to the root seems to be done more awkwardly than really |
| 19 | * necessary. At least, u32 doesn't seem to use such dirty hacks. To be |
| 20 | * verified. FIXME. |
| 21 | */ |
| 22 | |
| 23 | #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */ |
| 24 | #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */ |
| 25 | |
| 26 | |
| 27 | struct tcindex_filter_result { |
| 28 | struct tcf_exts exts; |
| 29 | struct tcf_result res; |
| 30 | union { |
| 31 | struct work_struct work; |
| 32 | struct rcu_head rcu; |
| 33 | }; |
| 34 | }; |
| 35 | |
| 36 | struct tcindex_filter { |
| 37 | u16 key; |
| 38 | struct tcindex_filter_result result; |
| 39 | struct tcindex_filter __rcu *next; |
| 40 | union { |
| 41 | struct work_struct work; |
| 42 | struct rcu_head rcu; |
| 43 | }; |
| 44 | }; |
| 45 | |
| 46 | |
| 47 | struct tcindex_data { |
| 48 | struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ |
| 49 | struct tcindex_filter __rcu **h; /* imperfect hash; */ |
| 50 | struct tcf_proto *tp; |
| 51 | u16 mask; /* AND key with mask */ |
| 52 | u32 shift; /* shift ANDed key to the right */ |
| 53 | u32 hash; /* hash table size; 0 if undefined */ |
| 54 | u32 alloc_hash; /* allocated size */ |
| 55 | u32 fall_through; /* 0: only classify if explicit match */ |
| 56 | struct rcu_head rcu; |
| 57 | }; |
| 58 | |
| 59 | static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) |
| 60 | { |
| 61 | return tcf_exts_has_actions(&r->exts) || r->res.classid; |
| 62 | } |
| 63 | |
| 64 | static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p, |
| 65 | u16 key) |
| 66 | { |
| 67 | if (p->perfect) { |
| 68 | struct tcindex_filter_result *f = p->perfect + key; |
| 69 | |
| 70 | return tcindex_filter_is_set(f) ? f : NULL; |
| 71 | } else if (p->h) { |
| 72 | struct tcindex_filter __rcu **fp; |
| 73 | struct tcindex_filter *f; |
| 74 | |
| 75 | fp = &p->h[key % p->hash]; |
| 76 | for (f = rcu_dereference_bh_rtnl(*fp); |
| 77 | f; |
| 78 | fp = &f->next, f = rcu_dereference_bh_rtnl(*fp)) |
| 79 | if (f->key == key) |
| 80 | return &f->result; |
| 81 | } |
| 82 | |
| 83 | return NULL; |
| 84 | } |
| 85 | |
| 86 | |
| 87 | static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
| 88 | struct tcf_result *res) |
| 89 | { |
| 90 | struct tcindex_data *p = rcu_dereference_bh(tp->root); |
| 91 | struct tcindex_filter_result *f; |
| 92 | int key = (skb->tc_index & p->mask) >> p->shift; |
| 93 | |
| 94 | pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n", |
| 95 | skb, tp, res, p); |
| 96 | |
| 97 | f = tcindex_lookup(p, key); |
| 98 | if (!f) { |
| 99 | if (!p->fall_through) |
| 100 | return -1; |
| 101 | res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key); |
| 102 | res->class = 0; |
| 103 | pr_debug("alg 0x%x\n", res->classid); |
| 104 | return 0; |
| 105 | } |
| 106 | *res = f->res; |
| 107 | pr_debug("map 0x%x\n", res->classid); |
| 108 | |
| 109 | return tcf_exts_exec(skb, &f->exts, res); |
| 110 | } |
| 111 | |
| 112 | |
| 113 | static void *tcindex_get(struct tcf_proto *tp, u32 handle) |
| 114 | { |
| 115 | struct tcindex_data *p = rtnl_dereference(tp->root); |
| 116 | struct tcindex_filter_result *r; |
| 117 | |
| 118 | pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); |
| 119 | if (p->perfect && handle >= p->alloc_hash) |
| 120 | return NULL; |
| 121 | r = tcindex_lookup(p, handle); |
| 122 | return r && tcindex_filter_is_set(r) ? r : NULL; |
| 123 | } |
| 124 | |
| 125 | static int tcindex_init(struct tcf_proto *tp) |
| 126 | { |
| 127 | struct tcindex_data *p; |
| 128 | |
| 129 | pr_debug("tcindex_init(tp %p)\n", tp); |
| 130 | p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL); |
| 131 | if (!p) |
| 132 | return -ENOMEM; |
| 133 | |
| 134 | p->mask = 0xffff; |
| 135 | p->hash = DEFAULT_HASH_SIZE; |
| 136 | p->fall_through = 1; |
| 137 | |
| 138 | rcu_assign_pointer(tp->root, p); |
| 139 | return 0; |
| 140 | } |
| 141 | |
| 142 | static void __tcindex_destroy_rexts(struct tcindex_filter_result *r) |
| 143 | { |
| 144 | tcf_exts_destroy(&r->exts); |
| 145 | tcf_exts_put_net(&r->exts); |
| 146 | } |
| 147 | |
| 148 | static void tcindex_destroy_rexts_work(struct work_struct *work) |
| 149 | { |
| 150 | struct tcindex_filter_result *r; |
| 151 | |
| 152 | r = container_of(work, struct tcindex_filter_result, work); |
| 153 | rtnl_lock(); |
| 154 | __tcindex_destroy_rexts(r); |
| 155 | rtnl_unlock(); |
| 156 | } |
| 157 | |
| 158 | static void tcindex_destroy_rexts(struct rcu_head *head) |
| 159 | { |
| 160 | struct tcindex_filter_result *r; |
| 161 | |
| 162 | r = container_of(head, struct tcindex_filter_result, rcu); |
| 163 | INIT_WORK(&r->work, tcindex_destroy_rexts_work); |
| 164 | tcf_queue_work(&r->work); |
| 165 | } |
| 166 | |
| 167 | static void __tcindex_destroy_fexts(struct tcindex_filter *f) |
| 168 | { |
| 169 | tcf_exts_destroy(&f->result.exts); |
| 170 | tcf_exts_put_net(&f->result.exts); |
| 171 | kfree(f); |
| 172 | } |
| 173 | |
| 174 | static void tcindex_destroy_fexts_work(struct work_struct *work) |
| 175 | { |
| 176 | struct tcindex_filter *f = container_of(work, struct tcindex_filter, |
| 177 | work); |
| 178 | |
| 179 | rtnl_lock(); |
| 180 | __tcindex_destroy_fexts(f); |
| 181 | rtnl_unlock(); |
| 182 | } |
| 183 | |
| 184 | static void tcindex_destroy_fexts(struct rcu_head *head) |
| 185 | { |
| 186 | struct tcindex_filter *f = container_of(head, struct tcindex_filter, |
| 187 | rcu); |
| 188 | |
| 189 | INIT_WORK(&f->work, tcindex_destroy_fexts_work); |
| 190 | tcf_queue_work(&f->work); |
| 191 | } |
| 192 | |
| 193 | static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last) |
| 194 | { |
| 195 | struct tcindex_data *p = rtnl_dereference(tp->root); |
| 196 | struct tcindex_filter_result *r = arg; |
| 197 | struct tcindex_filter __rcu **walk; |
| 198 | struct tcindex_filter *f = NULL; |
| 199 | |
| 200 | pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p); |
| 201 | if (p->perfect) { |
| 202 | if (!r->res.class) |
| 203 | return -ENOENT; |
| 204 | } else { |
| 205 | int i; |
| 206 | |
| 207 | for (i = 0; i < p->hash; i++) { |
| 208 | walk = p->h + i; |
| 209 | for (f = rtnl_dereference(*walk); f; |
| 210 | walk = &f->next, f = rtnl_dereference(*walk)) { |
| 211 | if (&f->result == r) |
| 212 | goto found; |
| 213 | } |
| 214 | } |
| 215 | return -ENOENT; |
| 216 | |
| 217 | found: |
| 218 | rcu_assign_pointer(*walk, rtnl_dereference(f->next)); |
| 219 | } |
| 220 | tcf_unbind_filter(tp, &r->res); |
| 221 | /* all classifiers are required to call tcf_exts_destroy() after rcu |
| 222 | * grace period, since converted-to-rcu actions are relying on that |
| 223 | * in cleanup() callback |
| 224 | */ |
| 225 | if (f) { |
| 226 | if (tcf_exts_get_net(&f->result.exts)) |
| 227 | call_rcu(&f->rcu, tcindex_destroy_fexts); |
| 228 | else |
| 229 | __tcindex_destroy_fexts(f); |
| 230 | } else { |
| 231 | if (tcf_exts_get_net(&r->exts)) |
| 232 | call_rcu(&r->rcu, tcindex_destroy_rexts); |
| 233 | else |
| 234 | __tcindex_destroy_rexts(r); |
| 235 | } |
| 236 | |
| 237 | *last = false; |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | static int tcindex_destroy_element(struct tcf_proto *tp, |
| 242 | void *arg, struct tcf_walker *walker) |
| 243 | { |
| 244 | bool last; |
| 245 | |
| 246 | return tcindex_delete(tp, arg, &last); |
| 247 | } |
| 248 | |
| 249 | static void __tcindex_destroy(struct rcu_head *head) |
| 250 | { |
| 251 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); |
| 252 | |
| 253 | kfree(p->perfect); |
| 254 | kfree(p->h); |
| 255 | kfree(p); |
| 256 | } |
| 257 | |
| 258 | static inline int |
| 259 | valid_perfect_hash(struct tcindex_data *p) |
| 260 | { |
| 261 | return p->hash > (p->mask >> p->shift); |
| 262 | } |
| 263 | |
| 264 | static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { |
| 265 | [TCA_TCINDEX_HASH] = { .type = NLA_U32 }, |
| 266 | [TCA_TCINDEX_MASK] = { .type = NLA_U16 }, |
| 267 | [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 }, |
| 268 | [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 }, |
| 269 | [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, |
| 270 | }; |
| 271 | |
| 272 | static int tcindex_filter_result_init(struct tcindex_filter_result *r) |
| 273 | { |
| 274 | memset(r, 0, sizeof(*r)); |
| 275 | return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
| 276 | } |
| 277 | |
| 278 | static void __tcindex_partial_destroy(struct rcu_head *head) |
| 279 | { |
| 280 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); |
| 281 | |
| 282 | kfree(p->perfect); |
| 283 | kfree(p); |
| 284 | } |
| 285 | |
| 286 | static void tcindex_free_perfect_hash(struct tcindex_data *cp) |
| 287 | { |
| 288 | int i; |
| 289 | |
| 290 | for (i = 0; i < cp->hash; i++) |
| 291 | tcf_exts_destroy(&cp->perfect[i].exts); |
| 292 | kfree(cp->perfect); |
| 293 | } |
| 294 | |
| 295 | static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) |
| 296 | { |
| 297 | int i, err = 0; |
| 298 | |
| 299 | cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result), |
| 300 | GFP_KERNEL); |
| 301 | if (!cp->perfect) |
| 302 | return -ENOMEM; |
| 303 | |
| 304 | for (i = 0; i < cp->hash; i++) { |
| 305 | err = tcf_exts_init(&cp->perfect[i].exts, |
| 306 | TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
| 307 | if (err < 0) |
| 308 | goto errout; |
| 309 | } |
| 310 | |
| 311 | return 0; |
| 312 | |
| 313 | errout: |
| 314 | tcindex_free_perfect_hash(cp); |
| 315 | return err; |
| 316 | } |
| 317 | |
| 318 | static int |
| 319 | tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, |
| 320 | u32 handle, struct tcindex_data *p, |
| 321 | struct tcindex_filter_result *r, struct nlattr **tb, |
| 322 | struct nlattr *est, bool ovr) |
| 323 | { |
| 324 | struct tcindex_filter_result new_filter_result, *old_r = r; |
| 325 | struct tcindex_data *cp = NULL, *oldp; |
| 326 | struct tcindex_filter *f = NULL; /* make gcc behave */ |
| 327 | struct tcf_result cr = {}; |
| 328 | int err, balloc = 0; |
| 329 | struct tcf_exts e; |
| 330 | |
| 331 | err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
| 332 | if (err < 0) |
| 333 | return err; |
| 334 | err = tcf_exts_validate(net, tp, tb, est, &e, ovr); |
| 335 | if (err < 0) |
| 336 | goto errout; |
| 337 | |
| 338 | err = -ENOMEM; |
| 339 | /* tcindex_data attributes must look atomic to classifier/lookup so |
| 340 | * allocate new tcindex data and RCU assign it onto root. Keeping |
| 341 | * perfect hash and hash pointers from old data. |
| 342 | */ |
| 343 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); |
| 344 | if (!cp) |
| 345 | goto errout; |
| 346 | |
| 347 | cp->mask = p->mask; |
| 348 | cp->shift = p->shift; |
| 349 | cp->hash = p->hash; |
| 350 | cp->alloc_hash = p->alloc_hash; |
| 351 | cp->fall_through = p->fall_through; |
| 352 | cp->tp = tp; |
| 353 | |
| 354 | if (tb[TCA_TCINDEX_HASH]) |
| 355 | cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
| 356 | |
| 357 | if (tb[TCA_TCINDEX_MASK]) |
| 358 | cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); |
| 359 | |
| 360 | if (tb[TCA_TCINDEX_SHIFT]) |
| 361 | cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); |
| 362 | |
| 363 | if (!cp->hash) { |
| 364 | /* Hash not specified, use perfect hash if the upper limit |
| 365 | * of the hashing index is below the threshold. |
| 366 | */ |
| 367 | if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) |
| 368 | cp->hash = (cp->mask >> cp->shift) + 1; |
| 369 | else |
| 370 | cp->hash = DEFAULT_HASH_SIZE; |
| 371 | } |
| 372 | |
| 373 | if (p->perfect) { |
| 374 | int i; |
| 375 | |
| 376 | if (tcindex_alloc_perfect_hash(cp) < 0) |
| 377 | goto errout; |
| 378 | cp->alloc_hash = cp->hash; |
| 379 | for (i = 0; i < min(cp->hash, p->hash); i++) |
| 380 | cp->perfect[i].res = p->perfect[i].res; |
| 381 | balloc = 1; |
| 382 | } |
| 383 | cp->h = p->h; |
| 384 | |
| 385 | err = tcindex_filter_result_init(&new_filter_result); |
| 386 | if (err < 0) |
| 387 | goto errout_alloc; |
| 388 | if (old_r) |
| 389 | cr = r->res; |
| 390 | |
| 391 | err = -EBUSY; |
| 392 | |
| 393 | /* Hash already allocated, make sure that we still meet the |
| 394 | * requirements for the allocated hash. |
| 395 | */ |
| 396 | if (cp->perfect) { |
| 397 | if (!valid_perfect_hash(cp) || |
| 398 | cp->hash > cp->alloc_hash) |
| 399 | goto errout_alloc; |
| 400 | } else if (cp->h && cp->hash != cp->alloc_hash) { |
| 401 | goto errout_alloc; |
| 402 | } |
| 403 | |
| 404 | err = -EINVAL; |
| 405 | if (tb[TCA_TCINDEX_FALL_THROUGH]) |
| 406 | cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); |
| 407 | |
| 408 | if (!cp->perfect && !cp->h) |
| 409 | cp->alloc_hash = cp->hash; |
| 410 | |
| 411 | /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) |
| 412 | * but then, we'd fail handles that may become valid after some future |
| 413 | * mask change. While this is extremely unlikely to ever matter, |
| 414 | * the check below is safer (and also more backwards-compatible). |
| 415 | */ |
| 416 | if (cp->perfect || valid_perfect_hash(cp)) |
| 417 | if (handle >= cp->alloc_hash) |
| 418 | goto errout_alloc; |
| 419 | |
| 420 | |
| 421 | err = -ENOMEM; |
| 422 | if (!cp->perfect && !cp->h) { |
| 423 | if (valid_perfect_hash(cp)) { |
| 424 | if (tcindex_alloc_perfect_hash(cp) < 0) |
| 425 | goto errout_alloc; |
| 426 | balloc = 1; |
| 427 | } else { |
| 428 | struct tcindex_filter __rcu **hash; |
| 429 | |
| 430 | hash = kcalloc(cp->hash, |
| 431 | sizeof(struct tcindex_filter *), |
| 432 | GFP_KERNEL); |
| 433 | |
| 434 | if (!hash) |
| 435 | goto errout_alloc; |
| 436 | |
| 437 | cp->h = hash; |
| 438 | balloc = 2; |
| 439 | } |
| 440 | } |
| 441 | |
| 442 | if (cp->perfect) |
| 443 | r = cp->perfect + handle; |
| 444 | else |
| 445 | r = tcindex_lookup(cp, handle) ? : &new_filter_result; |
| 446 | |
| 447 | if (r == &new_filter_result) { |
| 448 | f = kzalloc(sizeof(*f), GFP_KERNEL); |
| 449 | if (!f) |
| 450 | goto errout_alloc; |
| 451 | f->key = handle; |
| 452 | f->next = NULL; |
| 453 | err = tcindex_filter_result_init(&f->result); |
| 454 | if (err < 0) { |
| 455 | kfree(f); |
| 456 | goto errout_alloc; |
| 457 | } |
| 458 | } |
| 459 | |
| 460 | if (tb[TCA_TCINDEX_CLASSID]) { |
| 461 | cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); |
| 462 | tcf_bind_filter(tp, &cr, base); |
| 463 | } |
| 464 | |
| 465 | if (old_r && old_r != r) { |
| 466 | err = tcindex_filter_result_init(old_r); |
| 467 | if (err < 0) { |
| 468 | kfree(f); |
| 469 | goto errout_alloc; |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | oldp = p; |
| 474 | r->res = cr; |
| 475 | tcf_exts_change(&r->exts, &e); |
| 476 | |
| 477 | rcu_assign_pointer(tp->root, cp); |
| 478 | |
| 479 | if (r == &new_filter_result) { |
| 480 | struct tcindex_filter *nfp; |
| 481 | struct tcindex_filter __rcu **fp; |
| 482 | |
| 483 | f->result.res = r->res; |
| 484 | tcf_exts_change(&f->result.exts, &r->exts); |
| 485 | |
| 486 | fp = cp->h + (handle % cp->hash); |
| 487 | for (nfp = rtnl_dereference(*fp); |
| 488 | nfp; |
| 489 | fp = &nfp->next, nfp = rtnl_dereference(*fp)) |
| 490 | ; /* nothing */ |
| 491 | |
| 492 | rcu_assign_pointer(*fp, f); |
| 493 | } else { |
| 494 | tcf_exts_destroy(&new_filter_result.exts); |
| 495 | } |
| 496 | |
| 497 | if (oldp) |
| 498 | call_rcu(&oldp->rcu, __tcindex_partial_destroy); |
| 499 | return 0; |
| 500 | |
| 501 | errout_alloc: |
| 502 | if (balloc == 1) |
| 503 | tcindex_free_perfect_hash(cp); |
| 504 | else if (balloc == 2) |
| 505 | kfree(cp->h); |
| 506 | tcf_exts_destroy(&new_filter_result.exts); |
| 507 | errout: |
| 508 | kfree(cp); |
| 509 | tcf_exts_destroy(&e); |
| 510 | return err; |
| 511 | } |
| 512 | |
| 513 | static int |
| 514 | tcindex_change(struct net *net, struct sk_buff *in_skb, |
| 515 | struct tcf_proto *tp, unsigned long base, u32 handle, |
| 516 | struct nlattr **tca, void **arg, bool ovr) |
| 517 | { |
| 518 | struct nlattr *opt = tca[TCA_OPTIONS]; |
| 519 | struct nlattr *tb[TCA_TCINDEX_MAX + 1]; |
| 520 | struct tcindex_data *p = rtnl_dereference(tp->root); |
| 521 | struct tcindex_filter_result *r = *arg; |
| 522 | int err; |
| 523 | |
| 524 | pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p," |
| 525 | "p %p,r %p,*arg %p\n", |
| 526 | tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL); |
| 527 | |
| 528 | if (!opt) |
| 529 | return 0; |
| 530 | |
| 531 | err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy, NULL); |
| 532 | if (err < 0) |
| 533 | return err; |
| 534 | |
| 535 | return tcindex_set_parms(net, tp, base, handle, p, r, tb, |
| 536 | tca[TCA_RATE], ovr); |
| 537 | } |
| 538 | |
| 539 | static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) |
| 540 | { |
| 541 | struct tcindex_data *p = rtnl_dereference(tp->root); |
| 542 | struct tcindex_filter *f, *next; |
| 543 | int i; |
| 544 | |
| 545 | pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p); |
| 546 | if (p->perfect) { |
| 547 | for (i = 0; i < p->hash; i++) { |
| 548 | if (!p->perfect[i].res.class) |
| 549 | continue; |
| 550 | if (walker->count >= walker->skip) { |
| 551 | if (walker->fn(tp, p->perfect + i, walker) < 0) { |
| 552 | walker->stop = 1; |
| 553 | return; |
| 554 | } |
| 555 | } |
| 556 | walker->count++; |
| 557 | } |
| 558 | } |
| 559 | if (!p->h) |
| 560 | return; |
| 561 | for (i = 0; i < p->hash; i++) { |
| 562 | for (f = rtnl_dereference(p->h[i]); f; f = next) { |
| 563 | next = rtnl_dereference(f->next); |
| 564 | if (walker->count >= walker->skip) { |
| 565 | if (walker->fn(tp, &f->result, walker) < 0) { |
| 566 | walker->stop = 1; |
| 567 | return; |
| 568 | } |
| 569 | } |
| 570 | walker->count++; |
| 571 | } |
| 572 | } |
| 573 | } |
| 574 | |
| 575 | static void tcindex_destroy(struct tcf_proto *tp) |
| 576 | { |
| 577 | struct tcindex_data *p = rtnl_dereference(tp->root); |
| 578 | struct tcf_walker walker; |
| 579 | |
| 580 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); |
| 581 | walker.count = 0; |
| 582 | walker.skip = 0; |
| 583 | walker.fn = tcindex_destroy_element; |
| 584 | tcindex_walk(tp, &walker); |
| 585 | |
| 586 | call_rcu(&p->rcu, __tcindex_destroy); |
| 587 | } |
| 588 | |
| 589 | |
| 590 | static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh, |
| 591 | struct sk_buff *skb, struct tcmsg *t) |
| 592 | { |
| 593 | struct tcindex_data *p = rtnl_dereference(tp->root); |
| 594 | struct tcindex_filter_result *r = fh; |
| 595 | struct nlattr *nest; |
| 596 | |
| 597 | pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n", |
| 598 | tp, fh, skb, t, p, r); |
| 599 | pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); |
| 600 | |
| 601 | nest = nla_nest_start(skb, TCA_OPTIONS); |
| 602 | if (nest == NULL) |
| 603 | goto nla_put_failure; |
| 604 | |
| 605 | if (!fh) { |
| 606 | t->tcm_handle = ~0; /* whatever ... */ |
| 607 | if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) || |
| 608 | nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) || |
| 609 | nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) || |
| 610 | nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through)) |
| 611 | goto nla_put_failure; |
| 612 | nla_nest_end(skb, nest); |
| 613 | } else { |
| 614 | if (p->perfect) { |
| 615 | t->tcm_handle = r - p->perfect; |
| 616 | } else { |
| 617 | struct tcindex_filter *f; |
| 618 | struct tcindex_filter __rcu **fp; |
| 619 | int i; |
| 620 | |
| 621 | t->tcm_handle = 0; |
| 622 | for (i = 0; !t->tcm_handle && i < p->hash; i++) { |
| 623 | fp = &p->h[i]; |
| 624 | for (f = rtnl_dereference(*fp); |
| 625 | !t->tcm_handle && f; |
| 626 | fp = &f->next, f = rtnl_dereference(*fp)) { |
| 627 | if (&f->result == r) |
| 628 | t->tcm_handle = f->key; |
| 629 | } |
| 630 | } |
| 631 | } |
| 632 | pr_debug("handle = %d\n", t->tcm_handle); |
| 633 | if (r->res.class && |
| 634 | nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid)) |
| 635 | goto nla_put_failure; |
| 636 | |
| 637 | if (tcf_exts_dump(skb, &r->exts) < 0) |
| 638 | goto nla_put_failure; |
| 639 | nla_nest_end(skb, nest); |
| 640 | |
| 641 | if (tcf_exts_dump_stats(skb, &r->exts) < 0) |
| 642 | goto nla_put_failure; |
| 643 | } |
| 644 | |
| 645 | return skb->len; |
| 646 | |
| 647 | nla_put_failure: |
| 648 | nla_nest_cancel(skb, nest); |
| 649 | return -1; |
| 650 | } |
| 651 | |
| 652 | static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl) |
| 653 | { |
| 654 | struct tcindex_filter_result *r = fh; |
| 655 | |
| 656 | if (r && r->res.classid == classid) |
| 657 | r->res.class = cl; |
| 658 | } |
| 659 | |
| 660 | static struct tcf_proto_ops cls_tcindex_ops __read_mostly = { |
| 661 | .kind = "tcindex", |
| 662 | .classify = tcindex_classify, |
| 663 | .init = tcindex_init, |
| 664 | .destroy = tcindex_destroy, |
| 665 | .get = tcindex_get, |
| 666 | .change = tcindex_change, |
| 667 | .delete = tcindex_delete, |
| 668 | .walk = tcindex_walk, |
| 669 | .dump = tcindex_dump, |
| 670 | .bind_class = tcindex_bind_class, |
| 671 | .owner = THIS_MODULE, |
| 672 | }; |
| 673 | |
| 674 | static int __init init_tcindex(void) |
| 675 | { |
| 676 | return register_tcf_proto_ops(&cls_tcindex_ops); |
| 677 | } |
| 678 | |
| 679 | static void __exit exit_tcindex(void) |
| 680 | { |
| 681 | unregister_tcf_proto_ops(&cls_tcindex_ops); |
| 682 | } |
| 683 | |
| 684 | module_init(init_tcindex) |
| 685 | module_exit(exit_tcindex) |
| 686 | MODULE_LICENSE("GPL"); |