b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 3 | * Copyright (c) 2016,2017 Facebook |
| 4 | */ |
| 5 | #include <linux/bpf.h> |
| 6 | #include <linux/btf.h> |
| 7 | #include <linux/err.h> |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/filter.h> |
| 11 | #include <linux/perf_event.h> |
| 12 | #include <uapi/linux/btf.h> |
| 13 | |
| 14 | #include "map_in_map.h" |
| 15 | |
| 16 | #define ARRAY_CREATE_FLAG_MASK \ |
| 17 | (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) |
| 18 | |
| 19 | static void bpf_array_free_percpu(struct bpf_array *array) |
| 20 | { |
| 21 | int i; |
| 22 | |
| 23 | for (i = 0; i < array->map.max_entries; i++) { |
| 24 | free_percpu(array->pptrs[i]); |
| 25 | cond_resched(); |
| 26 | } |
| 27 | } |
| 28 | |
| 29 | static int bpf_array_alloc_percpu(struct bpf_array *array) |
| 30 | { |
| 31 | void __percpu *ptr; |
| 32 | int i; |
| 33 | |
| 34 | for (i = 0; i < array->map.max_entries; i++) { |
| 35 | ptr = __alloc_percpu_gfp(array->elem_size, 8, |
| 36 | GFP_USER | __GFP_NOWARN); |
| 37 | if (!ptr) { |
| 38 | bpf_array_free_percpu(array); |
| 39 | return -ENOMEM; |
| 40 | } |
| 41 | array->pptrs[i] = ptr; |
| 42 | cond_resched(); |
| 43 | } |
| 44 | |
| 45 | return 0; |
| 46 | } |
| 47 | |
| 48 | /* Called from syscall */ |
| 49 | int array_map_alloc_check(union bpf_attr *attr) |
| 50 | { |
| 51 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; |
| 52 | int numa_node = bpf_map_attr_numa_node(attr); |
| 53 | |
| 54 | /* check sanity of attributes */ |
| 55 | if (attr->max_entries == 0 || attr->key_size != 4 || |
| 56 | attr->value_size == 0 || |
| 57 | attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || |
| 58 | !bpf_map_flags_access_ok(attr->map_flags) || |
| 59 | (percpu && numa_node != NUMA_NO_NODE)) |
| 60 | return -EINVAL; |
| 61 | |
| 62 | if (attr->value_size > KMALLOC_MAX_SIZE) |
| 63 | /* if value_size is bigger, the user space won't be able to |
| 64 | * access the elements. |
| 65 | */ |
| 66 | return -E2BIG; |
| 67 | /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ |
| 68 | if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) |
| 69 | return -E2BIG; |
| 70 | |
| 71 | return 0; |
| 72 | } |
| 73 | |
| 74 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) |
| 75 | { |
| 76 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; |
| 77 | int ret, numa_node = bpf_map_attr_numa_node(attr); |
| 78 | u32 elem_size, index_mask, max_entries; |
| 79 | bool unpriv = !capable(CAP_SYS_ADMIN); |
| 80 | u64 cost, array_size, mask64; |
| 81 | struct bpf_map_memory mem; |
| 82 | struct bpf_array *array; |
| 83 | |
| 84 | elem_size = round_up(attr->value_size, 8); |
| 85 | |
| 86 | max_entries = attr->max_entries; |
| 87 | |
| 88 | /* On 32 bit archs roundup_pow_of_two() with max_entries that has |
| 89 | * upper most bit set in u32 space is undefined behavior due to |
| 90 | * resulting 1U << 32, so do it manually here in u64 space. |
| 91 | */ |
| 92 | mask64 = fls_long(max_entries - 1); |
| 93 | mask64 = 1ULL << mask64; |
| 94 | mask64 -= 1; |
| 95 | |
| 96 | index_mask = mask64; |
| 97 | if (unpriv) { |
| 98 | /* round up array size to nearest power of 2, |
| 99 | * since cpu will speculate within index_mask limits |
| 100 | */ |
| 101 | max_entries = index_mask + 1; |
| 102 | /* Check for overflows. */ |
| 103 | if (max_entries < attr->max_entries) |
| 104 | return ERR_PTR(-E2BIG); |
| 105 | } |
| 106 | |
| 107 | array_size = sizeof(*array); |
| 108 | if (percpu) |
| 109 | array_size += (u64) max_entries * sizeof(void *); |
| 110 | else |
| 111 | array_size += (u64) max_entries * elem_size; |
| 112 | |
| 113 | /* make sure there is no u32 overflow later in round_up() */ |
| 114 | cost = array_size; |
| 115 | if (percpu) |
| 116 | cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); |
| 117 | |
| 118 | ret = bpf_map_charge_init(&mem, cost); |
| 119 | if (ret < 0) |
| 120 | return ERR_PTR(ret); |
| 121 | |
| 122 | /* allocate all map elements and zero-initialize them */ |
| 123 | array = bpf_map_area_alloc(array_size, numa_node); |
| 124 | if (!array) { |
| 125 | bpf_map_charge_finish(&mem); |
| 126 | return ERR_PTR(-ENOMEM); |
| 127 | } |
| 128 | array->index_mask = index_mask; |
| 129 | array->map.unpriv_array = unpriv; |
| 130 | |
| 131 | /* copy mandatory map attributes */ |
| 132 | bpf_map_init_from_attr(&array->map, attr); |
| 133 | bpf_map_charge_move(&array->map.memory, &mem); |
| 134 | array->elem_size = elem_size; |
| 135 | |
| 136 | if (percpu && bpf_array_alloc_percpu(array)) { |
| 137 | bpf_map_charge_finish(&array->map.memory); |
| 138 | bpf_map_area_free(array); |
| 139 | return ERR_PTR(-ENOMEM); |
| 140 | } |
| 141 | |
| 142 | return &array->map; |
| 143 | } |
| 144 | |
| 145 | /* Called from syscall or from eBPF program */ |
| 146 | static void *array_map_lookup_elem(struct bpf_map *map, void *key) |
| 147 | { |
| 148 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 149 | u32 index = *(u32 *)key; |
| 150 | |
| 151 | if (unlikely(index >= array->map.max_entries)) |
| 152 | return NULL; |
| 153 | |
| 154 | return array->value + array->elem_size * (index & array->index_mask); |
| 155 | } |
| 156 | |
| 157 | static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, |
| 158 | u32 off) |
| 159 | { |
| 160 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 161 | |
| 162 | if (map->max_entries != 1) |
| 163 | return -ENOTSUPP; |
| 164 | if (off >= map->value_size) |
| 165 | return -EINVAL; |
| 166 | |
| 167 | *imm = (unsigned long)array->value; |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, |
| 172 | u32 *off) |
| 173 | { |
| 174 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 175 | u64 base = (unsigned long)array->value; |
| 176 | u64 range = array->elem_size; |
| 177 | |
| 178 | if (map->max_entries != 1) |
| 179 | return -ENOTSUPP; |
| 180 | if (imm < base || imm >= base + range) |
| 181 | return -ENOENT; |
| 182 | |
| 183 | *off = imm - base; |
| 184 | return 0; |
| 185 | } |
| 186 | |
| 187 | /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ |
| 188 | static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) |
| 189 | { |
| 190 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 191 | struct bpf_insn *insn = insn_buf; |
| 192 | u32 elem_size = round_up(map->value_size, 8); |
| 193 | const int ret = BPF_REG_0; |
| 194 | const int map_ptr = BPF_REG_1; |
| 195 | const int index = BPF_REG_2; |
| 196 | |
| 197 | *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); |
| 198 | *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); |
| 199 | if (map->unpriv_array) { |
| 200 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); |
| 201 | *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); |
| 202 | } else { |
| 203 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); |
| 204 | } |
| 205 | |
| 206 | if (is_power_of_2(elem_size)) { |
| 207 | *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); |
| 208 | } else { |
| 209 | *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); |
| 210 | } |
| 211 | *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); |
| 212 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); |
| 213 | *insn++ = BPF_MOV64_IMM(ret, 0); |
| 214 | return insn - insn_buf; |
| 215 | } |
| 216 | |
| 217 | /* Called from eBPF program */ |
| 218 | static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) |
| 219 | { |
| 220 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 221 | u32 index = *(u32 *)key; |
| 222 | |
| 223 | if (unlikely(index >= array->map.max_entries)) |
| 224 | return NULL; |
| 225 | |
| 226 | return this_cpu_ptr(array->pptrs[index & array->index_mask]); |
| 227 | } |
| 228 | |
| 229 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) |
| 230 | { |
| 231 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 232 | u32 index = *(u32 *)key; |
| 233 | void __percpu *pptr; |
| 234 | int cpu, off = 0; |
| 235 | u32 size; |
| 236 | |
| 237 | if (unlikely(index >= array->map.max_entries)) |
| 238 | return -ENOENT; |
| 239 | |
| 240 | /* per_cpu areas are zero-filled and bpf programs can only |
| 241 | * access 'value_size' of them, so copying rounded areas |
| 242 | * will not leak any kernel data |
| 243 | */ |
| 244 | size = round_up(map->value_size, 8); |
| 245 | rcu_read_lock(); |
| 246 | pptr = array->pptrs[index & array->index_mask]; |
| 247 | for_each_possible_cpu(cpu) { |
| 248 | bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); |
| 249 | off += size; |
| 250 | } |
| 251 | rcu_read_unlock(); |
| 252 | return 0; |
| 253 | } |
| 254 | |
| 255 | /* Called from syscall */ |
| 256 | static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 257 | { |
| 258 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 259 | u32 index = key ? *(u32 *)key : U32_MAX; |
| 260 | u32 *next = (u32 *)next_key; |
| 261 | |
| 262 | if (index >= array->map.max_entries) { |
| 263 | *next = 0; |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | if (index == array->map.max_entries - 1) |
| 268 | return -ENOENT; |
| 269 | |
| 270 | *next = index + 1; |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | /* Called from syscall or from eBPF program */ |
| 275 | static int array_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 276 | u64 map_flags) |
| 277 | { |
| 278 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 279 | u32 index = *(u32 *)key; |
| 280 | char *val; |
| 281 | |
| 282 | if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) |
| 283 | /* unknown flags */ |
| 284 | return -EINVAL; |
| 285 | |
| 286 | if (unlikely(index >= array->map.max_entries)) |
| 287 | /* all elements were pre-allocated, cannot insert a new one */ |
| 288 | return -E2BIG; |
| 289 | |
| 290 | if (unlikely(map_flags & BPF_NOEXIST)) |
| 291 | /* all elements already exist */ |
| 292 | return -EEXIST; |
| 293 | |
| 294 | if (unlikely((map_flags & BPF_F_LOCK) && |
| 295 | !map_value_has_spin_lock(map))) |
| 296 | return -EINVAL; |
| 297 | |
| 298 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { |
| 299 | memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), |
| 300 | value, map->value_size); |
| 301 | } else { |
| 302 | val = array->value + |
| 303 | array->elem_size * (index & array->index_mask); |
| 304 | if (map_flags & BPF_F_LOCK) |
| 305 | copy_map_value_locked(map, val, value, false); |
| 306 | else |
| 307 | copy_map_value(map, val, value); |
| 308 | } |
| 309 | return 0; |
| 310 | } |
| 311 | |
| 312 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, |
| 313 | u64 map_flags) |
| 314 | { |
| 315 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 316 | u32 index = *(u32 *)key; |
| 317 | void __percpu *pptr; |
| 318 | int cpu, off = 0; |
| 319 | u32 size; |
| 320 | |
| 321 | if (unlikely(map_flags > BPF_EXIST)) |
| 322 | /* unknown flags */ |
| 323 | return -EINVAL; |
| 324 | |
| 325 | if (unlikely(index >= array->map.max_entries)) |
| 326 | /* all elements were pre-allocated, cannot insert a new one */ |
| 327 | return -E2BIG; |
| 328 | |
| 329 | if (unlikely(map_flags == BPF_NOEXIST)) |
| 330 | /* all elements already exist */ |
| 331 | return -EEXIST; |
| 332 | |
| 333 | /* the user space will provide round_up(value_size, 8) bytes that |
| 334 | * will be copied into per-cpu area. bpf programs can only access |
| 335 | * value_size of it. During lookup the same extra bytes will be |
| 336 | * returned or zeros which were zero-filled by percpu_alloc, |
| 337 | * so no kernel data leaks possible |
| 338 | */ |
| 339 | size = round_up(map->value_size, 8); |
| 340 | rcu_read_lock(); |
| 341 | pptr = array->pptrs[index & array->index_mask]; |
| 342 | for_each_possible_cpu(cpu) { |
| 343 | bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); |
| 344 | off += size; |
| 345 | } |
| 346 | rcu_read_unlock(); |
| 347 | return 0; |
| 348 | } |
| 349 | |
| 350 | /* Called from syscall or from eBPF program */ |
| 351 | static int array_map_delete_elem(struct bpf_map *map, void *key) |
| 352 | { |
| 353 | return -EINVAL; |
| 354 | } |
| 355 | |
| 356 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
| 357 | static void array_map_free(struct bpf_map *map) |
| 358 | { |
| 359 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 360 | |
| 361 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, |
| 362 | * so the programs (can be more than one that used this map) were |
| 363 | * disconnected from events. Wait for outstanding programs to complete |
| 364 | * and free the array |
| 365 | */ |
| 366 | synchronize_rcu(); |
| 367 | |
| 368 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
| 369 | bpf_array_free_percpu(array); |
| 370 | |
| 371 | bpf_map_area_free(array); |
| 372 | } |
| 373 | |
| 374 | static void array_map_seq_show_elem(struct bpf_map *map, void *key, |
| 375 | struct seq_file *m) |
| 376 | { |
| 377 | void *value; |
| 378 | |
| 379 | rcu_read_lock(); |
| 380 | |
| 381 | value = array_map_lookup_elem(map, key); |
| 382 | if (!value) { |
| 383 | rcu_read_unlock(); |
| 384 | return; |
| 385 | } |
| 386 | |
| 387 | if (map->btf_key_type_id) |
| 388 | seq_printf(m, "%u: ", *(u32 *)key); |
| 389 | btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); |
| 390 | seq_puts(m, "\n"); |
| 391 | |
| 392 | rcu_read_unlock(); |
| 393 | } |
| 394 | |
| 395 | static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, |
| 396 | struct seq_file *m) |
| 397 | { |
| 398 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 399 | u32 index = *(u32 *)key; |
| 400 | void __percpu *pptr; |
| 401 | int cpu; |
| 402 | |
| 403 | rcu_read_lock(); |
| 404 | |
| 405 | seq_printf(m, "%u: {\n", *(u32 *)key); |
| 406 | pptr = array->pptrs[index & array->index_mask]; |
| 407 | for_each_possible_cpu(cpu) { |
| 408 | seq_printf(m, "\tcpu%d: ", cpu); |
| 409 | btf_type_seq_show(map->btf, map->btf_value_type_id, |
| 410 | per_cpu_ptr(pptr, cpu), m); |
| 411 | seq_puts(m, "\n"); |
| 412 | } |
| 413 | seq_puts(m, "}\n"); |
| 414 | |
| 415 | rcu_read_unlock(); |
| 416 | } |
| 417 | |
| 418 | static int array_map_check_btf(const struct bpf_map *map, |
| 419 | const struct btf *btf, |
| 420 | const struct btf_type *key_type, |
| 421 | const struct btf_type *value_type) |
| 422 | { |
| 423 | u32 int_data; |
| 424 | |
| 425 | /* One exception for keyless BTF: .bss/.data/.rodata map */ |
| 426 | if (btf_type_is_void(key_type)) { |
| 427 | if (map->map_type != BPF_MAP_TYPE_ARRAY || |
| 428 | map->max_entries != 1) |
| 429 | return -EINVAL; |
| 430 | |
| 431 | if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) |
| 432 | return -EINVAL; |
| 433 | |
| 434 | return 0; |
| 435 | } |
| 436 | |
| 437 | if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) |
| 438 | return -EINVAL; |
| 439 | |
| 440 | int_data = *(u32 *)(key_type + 1); |
| 441 | /* bpf array can only take a u32 key. This check makes sure |
| 442 | * that the btf matches the attr used during map_create. |
| 443 | */ |
| 444 | if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) |
| 445 | return -EINVAL; |
| 446 | |
| 447 | return 0; |
| 448 | } |
| 449 | |
| 450 | const struct bpf_map_ops array_map_ops = { |
| 451 | .map_alloc_check = array_map_alloc_check, |
| 452 | .map_alloc = array_map_alloc, |
| 453 | .map_free = array_map_free, |
| 454 | .map_get_next_key = array_map_get_next_key, |
| 455 | .map_lookup_elem = array_map_lookup_elem, |
| 456 | .map_update_elem = array_map_update_elem, |
| 457 | .map_delete_elem = array_map_delete_elem, |
| 458 | .map_gen_lookup = array_map_gen_lookup, |
| 459 | .map_direct_value_addr = array_map_direct_value_addr, |
| 460 | .map_direct_value_meta = array_map_direct_value_meta, |
| 461 | .map_seq_show_elem = array_map_seq_show_elem, |
| 462 | .map_check_btf = array_map_check_btf, |
| 463 | }; |
| 464 | |
| 465 | const struct bpf_map_ops percpu_array_map_ops = { |
| 466 | .map_alloc_check = array_map_alloc_check, |
| 467 | .map_alloc = array_map_alloc, |
| 468 | .map_free = array_map_free, |
| 469 | .map_get_next_key = array_map_get_next_key, |
| 470 | .map_lookup_elem = percpu_array_map_lookup_elem, |
| 471 | .map_update_elem = array_map_update_elem, |
| 472 | .map_delete_elem = array_map_delete_elem, |
| 473 | .map_seq_show_elem = percpu_array_map_seq_show_elem, |
| 474 | .map_check_btf = array_map_check_btf, |
| 475 | }; |
| 476 | |
| 477 | static int fd_array_map_alloc_check(union bpf_attr *attr) |
| 478 | { |
| 479 | /* only file descriptors can be stored in this type of map */ |
| 480 | if (attr->value_size != sizeof(u32)) |
| 481 | return -EINVAL; |
| 482 | /* Program read-only/write-only not supported for special maps yet. */ |
| 483 | if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) |
| 484 | return -EINVAL; |
| 485 | return array_map_alloc_check(attr); |
| 486 | } |
| 487 | |
| 488 | static void fd_array_map_free(struct bpf_map *map) |
| 489 | { |
| 490 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 491 | int i; |
| 492 | |
| 493 | synchronize_rcu(); |
| 494 | |
| 495 | /* make sure it's empty */ |
| 496 | for (i = 0; i < array->map.max_entries; i++) |
| 497 | BUG_ON(array->ptrs[i] != NULL); |
| 498 | |
| 499 | bpf_map_area_free(array); |
| 500 | } |
| 501 | |
| 502 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) |
| 503 | { |
| 504 | return ERR_PTR(-EOPNOTSUPP); |
| 505 | } |
| 506 | |
| 507 | /* only called from syscall */ |
| 508 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) |
| 509 | { |
| 510 | void **elem, *ptr; |
| 511 | int ret = 0; |
| 512 | |
| 513 | if (!map->ops->map_fd_sys_lookup_elem) |
| 514 | return -ENOTSUPP; |
| 515 | |
| 516 | rcu_read_lock(); |
| 517 | elem = array_map_lookup_elem(map, key); |
| 518 | if (elem && (ptr = READ_ONCE(*elem))) |
| 519 | *value = map->ops->map_fd_sys_lookup_elem(ptr); |
| 520 | else |
| 521 | ret = -ENOENT; |
| 522 | rcu_read_unlock(); |
| 523 | |
| 524 | return ret; |
| 525 | } |
| 526 | |
| 527 | /* only called from syscall */ |
| 528 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
| 529 | void *key, void *value, u64 map_flags) |
| 530 | { |
| 531 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 532 | void *new_ptr, *old_ptr; |
| 533 | u32 index = *(u32 *)key, ufd; |
| 534 | |
| 535 | if (map_flags != BPF_ANY) |
| 536 | return -EINVAL; |
| 537 | |
| 538 | if (index >= array->map.max_entries) |
| 539 | return -E2BIG; |
| 540 | |
| 541 | ufd = *(u32 *)value; |
| 542 | new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); |
| 543 | if (IS_ERR(new_ptr)) |
| 544 | return PTR_ERR(new_ptr); |
| 545 | |
| 546 | old_ptr = xchg(array->ptrs + index, new_ptr); |
| 547 | if (old_ptr) |
| 548 | map->ops->map_fd_put_ptr(map, old_ptr, true); |
| 549 | |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | static int fd_array_map_delete_elem(struct bpf_map *map, void *key) |
| 554 | { |
| 555 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 556 | void *old_ptr; |
| 557 | u32 index = *(u32 *)key; |
| 558 | |
| 559 | if (index >= array->map.max_entries) |
| 560 | return -E2BIG; |
| 561 | |
| 562 | old_ptr = xchg(array->ptrs + index, NULL); |
| 563 | if (old_ptr) { |
| 564 | map->ops->map_fd_put_ptr(map, old_ptr, true); |
| 565 | return 0; |
| 566 | } else { |
| 567 | return -ENOENT; |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | static void *prog_fd_array_get_ptr(struct bpf_map *map, |
| 572 | struct file *map_file, int fd) |
| 573 | { |
| 574 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 575 | struct bpf_prog *prog = bpf_prog_get(fd); |
| 576 | |
| 577 | if (IS_ERR(prog)) |
| 578 | return prog; |
| 579 | |
| 580 | if (!bpf_prog_array_compatible(array, prog)) { |
| 581 | bpf_prog_put(prog); |
| 582 | return ERR_PTR(-EINVAL); |
| 583 | } |
| 584 | |
| 585 | return prog; |
| 586 | } |
| 587 | |
| 588 | static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) |
| 589 | { |
| 590 | /* bpf_prog is freed after one RCU or tasks trace grace period */ |
| 591 | bpf_prog_put(ptr); |
| 592 | } |
| 593 | |
| 594 | static u32 prog_fd_array_sys_lookup_elem(void *ptr) |
| 595 | { |
| 596 | return ((struct bpf_prog *)ptr)->aux->id; |
| 597 | } |
| 598 | |
| 599 | /* decrement refcnt of all bpf_progs that are stored in this map */ |
| 600 | static void bpf_fd_array_map_clear(struct bpf_map *map) |
| 601 | { |
| 602 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 603 | int i; |
| 604 | |
| 605 | for (i = 0; i < array->map.max_entries; i++) |
| 606 | fd_array_map_delete_elem(map, &i); |
| 607 | } |
| 608 | |
| 609 | static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, |
| 610 | struct seq_file *m) |
| 611 | { |
| 612 | void **elem, *ptr; |
| 613 | u32 prog_id; |
| 614 | |
| 615 | rcu_read_lock(); |
| 616 | |
| 617 | elem = array_map_lookup_elem(map, key); |
| 618 | if (elem) { |
| 619 | ptr = READ_ONCE(*elem); |
| 620 | if (ptr) { |
| 621 | seq_printf(m, "%u: ", *(u32 *)key); |
| 622 | prog_id = prog_fd_array_sys_lookup_elem(ptr); |
| 623 | btf_type_seq_show(map->btf, map->btf_value_type_id, |
| 624 | &prog_id, m); |
| 625 | seq_puts(m, "\n"); |
| 626 | } |
| 627 | } |
| 628 | |
| 629 | rcu_read_unlock(); |
| 630 | } |
| 631 | |
| 632 | const struct bpf_map_ops prog_array_map_ops = { |
| 633 | .map_alloc_check = fd_array_map_alloc_check, |
| 634 | .map_alloc = array_map_alloc, |
| 635 | .map_free = fd_array_map_free, |
| 636 | .map_get_next_key = array_map_get_next_key, |
| 637 | .map_lookup_elem = fd_array_map_lookup_elem, |
| 638 | .map_delete_elem = fd_array_map_delete_elem, |
| 639 | .map_fd_get_ptr = prog_fd_array_get_ptr, |
| 640 | .map_fd_put_ptr = prog_fd_array_put_ptr, |
| 641 | .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, |
| 642 | .map_release_uref = bpf_fd_array_map_clear, |
| 643 | .map_seq_show_elem = prog_array_map_seq_show_elem, |
| 644 | }; |
| 645 | |
| 646 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, |
| 647 | struct file *map_file) |
| 648 | { |
| 649 | struct bpf_event_entry *ee; |
| 650 | |
| 651 | ee = kzalloc(sizeof(*ee), GFP_ATOMIC); |
| 652 | if (ee) { |
| 653 | ee->event = perf_file->private_data; |
| 654 | ee->perf_file = perf_file; |
| 655 | ee->map_file = map_file; |
| 656 | } |
| 657 | |
| 658 | return ee; |
| 659 | } |
| 660 | |
| 661 | static void __bpf_event_entry_free(struct rcu_head *rcu) |
| 662 | { |
| 663 | struct bpf_event_entry *ee; |
| 664 | |
| 665 | ee = container_of(rcu, struct bpf_event_entry, rcu); |
| 666 | fput(ee->perf_file); |
| 667 | kfree(ee); |
| 668 | } |
| 669 | |
| 670 | static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) |
| 671 | { |
| 672 | call_rcu(&ee->rcu, __bpf_event_entry_free); |
| 673 | } |
| 674 | |
| 675 | static void *perf_event_fd_array_get_ptr(struct bpf_map *map, |
| 676 | struct file *map_file, int fd) |
| 677 | { |
| 678 | struct bpf_event_entry *ee; |
| 679 | struct perf_event *event; |
| 680 | struct file *perf_file; |
| 681 | u64 value; |
| 682 | |
| 683 | perf_file = perf_event_get(fd); |
| 684 | if (IS_ERR(perf_file)) |
| 685 | return perf_file; |
| 686 | |
| 687 | ee = ERR_PTR(-EOPNOTSUPP); |
| 688 | event = perf_file->private_data; |
| 689 | if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) |
| 690 | goto err_out; |
| 691 | |
| 692 | ee = bpf_event_entry_gen(perf_file, map_file); |
| 693 | if (ee) |
| 694 | return ee; |
| 695 | ee = ERR_PTR(-ENOMEM); |
| 696 | err_out: |
| 697 | fput(perf_file); |
| 698 | return ee; |
| 699 | } |
| 700 | |
| 701 | static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) |
| 702 | { |
| 703 | /* bpf_perf_event is freed after one RCU grace period */ |
| 704 | bpf_event_entry_free_rcu(ptr); |
| 705 | } |
| 706 | |
| 707 | static void perf_event_fd_array_release(struct bpf_map *map, |
| 708 | struct file *map_file) |
| 709 | { |
| 710 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 711 | struct bpf_event_entry *ee; |
| 712 | int i; |
| 713 | |
| 714 | rcu_read_lock(); |
| 715 | for (i = 0; i < array->map.max_entries; i++) { |
| 716 | ee = READ_ONCE(array->ptrs[i]); |
| 717 | if (ee && ee->map_file == map_file) |
| 718 | fd_array_map_delete_elem(map, &i); |
| 719 | } |
| 720 | rcu_read_unlock(); |
| 721 | } |
| 722 | |
| 723 | const struct bpf_map_ops perf_event_array_map_ops = { |
| 724 | .map_alloc_check = fd_array_map_alloc_check, |
| 725 | .map_alloc = array_map_alloc, |
| 726 | .map_free = fd_array_map_free, |
| 727 | .map_get_next_key = array_map_get_next_key, |
| 728 | .map_lookup_elem = fd_array_map_lookup_elem, |
| 729 | .map_delete_elem = fd_array_map_delete_elem, |
| 730 | .map_fd_get_ptr = perf_event_fd_array_get_ptr, |
| 731 | .map_fd_put_ptr = perf_event_fd_array_put_ptr, |
| 732 | .map_release = perf_event_fd_array_release, |
| 733 | .map_check_btf = map_check_no_btf, |
| 734 | }; |
| 735 | |
| 736 | #ifdef CONFIG_CGROUPS |
| 737 | static void *cgroup_fd_array_get_ptr(struct bpf_map *map, |
| 738 | struct file *map_file /* not used */, |
| 739 | int fd) |
| 740 | { |
| 741 | return cgroup_get_from_fd(fd); |
| 742 | } |
| 743 | |
| 744 | static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) |
| 745 | { |
| 746 | /* cgroup_put free cgrp after a rcu grace period */ |
| 747 | cgroup_put(ptr); |
| 748 | } |
| 749 | |
| 750 | static void cgroup_fd_array_free(struct bpf_map *map) |
| 751 | { |
| 752 | bpf_fd_array_map_clear(map); |
| 753 | fd_array_map_free(map); |
| 754 | } |
| 755 | |
| 756 | const struct bpf_map_ops cgroup_array_map_ops = { |
| 757 | .map_alloc_check = fd_array_map_alloc_check, |
| 758 | .map_alloc = array_map_alloc, |
| 759 | .map_free = cgroup_fd_array_free, |
| 760 | .map_get_next_key = array_map_get_next_key, |
| 761 | .map_lookup_elem = fd_array_map_lookup_elem, |
| 762 | .map_delete_elem = fd_array_map_delete_elem, |
| 763 | .map_fd_get_ptr = cgroup_fd_array_get_ptr, |
| 764 | .map_fd_put_ptr = cgroup_fd_array_put_ptr, |
| 765 | .map_check_btf = map_check_no_btf, |
| 766 | }; |
| 767 | #endif |
| 768 | |
| 769 | static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) |
| 770 | { |
| 771 | struct bpf_map *map, *inner_map_meta; |
| 772 | |
| 773 | inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); |
| 774 | if (IS_ERR(inner_map_meta)) |
| 775 | return inner_map_meta; |
| 776 | |
| 777 | map = array_map_alloc(attr); |
| 778 | if (IS_ERR(map)) { |
| 779 | bpf_map_meta_free(inner_map_meta); |
| 780 | return map; |
| 781 | } |
| 782 | |
| 783 | map->inner_map_meta = inner_map_meta; |
| 784 | |
| 785 | return map; |
| 786 | } |
| 787 | |
| 788 | static void array_of_map_free(struct bpf_map *map) |
| 789 | { |
| 790 | /* map->inner_map_meta is only accessed by syscall which |
| 791 | * is protected by fdget/fdput. |
| 792 | */ |
| 793 | bpf_map_meta_free(map->inner_map_meta); |
| 794 | bpf_fd_array_map_clear(map); |
| 795 | fd_array_map_free(map); |
| 796 | } |
| 797 | |
| 798 | static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) |
| 799 | { |
| 800 | struct bpf_map **inner_map = array_map_lookup_elem(map, key); |
| 801 | |
| 802 | if (!inner_map) |
| 803 | return NULL; |
| 804 | |
| 805 | return READ_ONCE(*inner_map); |
| 806 | } |
| 807 | |
| 808 | static u32 array_of_map_gen_lookup(struct bpf_map *map, |
| 809 | struct bpf_insn *insn_buf) |
| 810 | { |
| 811 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 812 | u32 elem_size = round_up(map->value_size, 8); |
| 813 | struct bpf_insn *insn = insn_buf; |
| 814 | const int ret = BPF_REG_0; |
| 815 | const int map_ptr = BPF_REG_1; |
| 816 | const int index = BPF_REG_2; |
| 817 | |
| 818 | *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); |
| 819 | *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); |
| 820 | if (map->unpriv_array) { |
| 821 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); |
| 822 | *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); |
| 823 | } else { |
| 824 | *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); |
| 825 | } |
| 826 | if (is_power_of_2(elem_size)) |
| 827 | *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); |
| 828 | else |
| 829 | *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); |
| 830 | *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); |
| 831 | *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); |
| 832 | *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); |
| 833 | *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); |
| 834 | *insn++ = BPF_MOV64_IMM(ret, 0); |
| 835 | |
| 836 | return insn - insn_buf; |
| 837 | } |
| 838 | |
| 839 | const struct bpf_map_ops array_of_maps_map_ops = { |
| 840 | .map_alloc_check = fd_array_map_alloc_check, |
| 841 | .map_alloc = array_of_map_alloc, |
| 842 | .map_free = array_of_map_free, |
| 843 | .map_get_next_key = array_map_get_next_key, |
| 844 | .map_lookup_elem = array_of_map_lookup_elem, |
| 845 | .map_delete_elem = fd_array_map_delete_elem, |
| 846 | .map_fd_get_ptr = bpf_map_fd_get_ptr, |
| 847 | .map_fd_put_ptr = bpf_map_fd_put_ptr, |
| 848 | .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, |
| 849 | .map_gen_lookup = array_of_map_gen_lookup, |
| 850 | .map_check_btf = map_check_no_btf, |
| 851 | }; |