blob: 26ba7cb01136cb98d1747757cf3d9057f477935e [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * queue_stack_maps.c: BPF queue and stack maps
4 *
5 * Copyright (c) 2018 Politecnico di Torino
6 */
7#include <linux/bpf.h>
8#include <linux/list.h>
9#include <linux/slab.h>
10#include <linux/capability.h>
11#include "percpu_freelist.h"
12
13#define QUEUE_STACK_CREATE_FLAG_MASK \
14 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
15
16struct bpf_queue_stack {
17 struct bpf_map map;
18 raw_spinlock_t lock;
19 u32 head, tail;
20 u32 size; /* max_entries + 1 */
21
22 char elements[0] __aligned(8);
23};
24
25static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
26{
27 return container_of(map, struct bpf_queue_stack, map);
28}
29
30static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
31{
32 return qs->head == qs->tail;
33}
34
35static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
36{
37 u32 head = qs->head + 1;
38
39 if (unlikely(head >= qs->size))
40 head = 0;
41
42 return head == qs->tail;
43}
44
45/* Called from syscall */
46static int queue_stack_map_alloc_check(union bpf_attr *attr)
47{
48 if (!capable(CAP_SYS_ADMIN))
49 return -EPERM;
50
51 /* check sanity of attributes */
52 if (attr->max_entries == 0 || attr->key_size != 0 ||
53 attr->value_size == 0 ||
54 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
55 !bpf_map_flags_access_ok(attr->map_flags))
56 return -EINVAL;
57
58 if (attr->value_size > KMALLOC_MAX_SIZE)
59 /* if value_size is bigger, the user space won't be able to
60 * access the elements.
61 */
62 return -E2BIG;
63
64 return 0;
65}
66
67static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
68{
69 int ret, numa_node = bpf_map_attr_numa_node(attr);
70 struct bpf_map_memory mem = {0};
71 struct bpf_queue_stack *qs;
72 u64 size, queue_size, cost;
73
74 size = (u64) attr->max_entries + 1;
75 cost = queue_size = sizeof(*qs) + size * attr->value_size;
76
77 ret = bpf_map_charge_init(&mem, cost);
78 if (ret < 0)
79 return ERR_PTR(ret);
80
81 qs = bpf_map_area_alloc(queue_size, numa_node);
82 if (!qs) {
83 bpf_map_charge_finish(&mem);
84 return ERR_PTR(-ENOMEM);
85 }
86
87 memset(qs, 0, sizeof(*qs));
88
89 bpf_map_init_from_attr(&qs->map, attr);
90
91 bpf_map_charge_move(&qs->map.memory, &mem);
92 qs->size = size;
93
94 raw_spin_lock_init(&qs->lock);
95
96 return &qs->map;
97}
98
99/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
100static void queue_stack_map_free(struct bpf_map *map)
101{
102 struct bpf_queue_stack *qs = bpf_queue_stack(map);
103
104 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
105 * so the programs (can be more than one that used this map) were
106 * disconnected from events. Wait for outstanding critical sections in
107 * these programs to complete
108 */
109 synchronize_rcu();
110
111 bpf_map_area_free(qs);
112}
113
114static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
115{
116 struct bpf_queue_stack *qs = bpf_queue_stack(map);
117 unsigned long flags;
118 int err = 0;
119 void *ptr;
120
121 if (in_nmi()) {
122 if (!raw_spin_trylock_irqsave(&qs->lock, flags))
123 return -EBUSY;
124 } else {
125 raw_spin_lock_irqsave(&qs->lock, flags);
126 }
127
128 if (queue_stack_map_is_empty(qs)) {
129 memset(value, 0, qs->map.value_size);
130 err = -ENOENT;
131 goto out;
132 }
133
134 ptr = &qs->elements[qs->tail * qs->map.value_size];
135 memcpy(value, ptr, qs->map.value_size);
136
137 if (delete) {
138 if (unlikely(++qs->tail >= qs->size))
139 qs->tail = 0;
140 }
141
142out:
143 raw_spin_unlock_irqrestore(&qs->lock, flags);
144 return err;
145}
146
147
148static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
149{
150 struct bpf_queue_stack *qs = bpf_queue_stack(map);
151 unsigned long flags;
152 int err = 0;
153 void *ptr;
154 u32 index;
155
156 if (in_nmi()) {
157 if (!raw_spin_trylock_irqsave(&qs->lock, flags))
158 return -EBUSY;
159 } else {
160 raw_spin_lock_irqsave(&qs->lock, flags);
161 }
162
163 if (queue_stack_map_is_empty(qs)) {
164 memset(value, 0, qs->map.value_size);
165 err = -ENOENT;
166 goto out;
167 }
168
169 index = qs->head - 1;
170 if (unlikely(index >= qs->size))
171 index = qs->size - 1;
172
173 ptr = &qs->elements[index * qs->map.value_size];
174 memcpy(value, ptr, qs->map.value_size);
175
176 if (delete)
177 qs->head = index;
178
179out:
180 raw_spin_unlock_irqrestore(&qs->lock, flags);
181 return err;
182}
183
184/* Called from syscall or from eBPF program */
185static int queue_map_peek_elem(struct bpf_map *map, void *value)
186{
187 return __queue_map_get(map, value, false);
188}
189
190/* Called from syscall or from eBPF program */
191static int stack_map_peek_elem(struct bpf_map *map, void *value)
192{
193 return __stack_map_get(map, value, false);
194}
195
196/* Called from syscall or from eBPF program */
197static int queue_map_pop_elem(struct bpf_map *map, void *value)
198{
199 return __queue_map_get(map, value, true);
200}
201
202/* Called from syscall or from eBPF program */
203static int stack_map_pop_elem(struct bpf_map *map, void *value)
204{
205 return __stack_map_get(map, value, true);
206}
207
208/* Called from syscall or from eBPF program */
209static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
210 u64 flags)
211{
212 struct bpf_queue_stack *qs = bpf_queue_stack(map);
213 unsigned long irq_flags;
214 int err = 0;
215 void *dst;
216
217 /* BPF_EXIST is used to force making room for a new element in case the
218 * map is full
219 */
220 bool replace = (flags & BPF_EXIST);
221
222 /* Check supported flags for queue and stack maps */
223 if (flags & BPF_NOEXIST || flags > BPF_EXIST)
224 return -EINVAL;
225
226 if (in_nmi()) {
227 if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
228 return -EBUSY;
229 } else {
230 raw_spin_lock_irqsave(&qs->lock, irq_flags);
231 }
232
233 if (queue_stack_map_is_full(qs)) {
234 if (!replace) {
235 err = -E2BIG;
236 goto out;
237 }
238 /* advance tail pointer to overwrite oldest element */
239 if (unlikely(++qs->tail >= qs->size))
240 qs->tail = 0;
241 }
242
243 dst = &qs->elements[qs->head * qs->map.value_size];
244 memcpy(dst, value, qs->map.value_size);
245
246 if (unlikely(++qs->head >= qs->size))
247 qs->head = 0;
248
249out:
250 raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
251 return err;
252}
253
254/* Called from syscall or from eBPF program */
255static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
256{
257 return NULL;
258}
259
260/* Called from syscall or from eBPF program */
261static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
262 void *value, u64 flags)
263{
264 return -EINVAL;
265}
266
267/* Called from syscall or from eBPF program */
268static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
269{
270 return -EINVAL;
271}
272
273/* Called from syscall */
274static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
275 void *next_key)
276{
277 return -EINVAL;
278}
279
280const struct bpf_map_ops queue_map_ops = {
281 .map_alloc_check = queue_stack_map_alloc_check,
282 .map_alloc = queue_stack_map_alloc,
283 .map_free = queue_stack_map_free,
284 .map_lookup_elem = queue_stack_map_lookup_elem,
285 .map_update_elem = queue_stack_map_update_elem,
286 .map_delete_elem = queue_stack_map_delete_elem,
287 .map_push_elem = queue_stack_map_push_elem,
288 .map_pop_elem = queue_map_pop_elem,
289 .map_peek_elem = queue_map_peek_elem,
290 .map_get_next_key = queue_stack_map_get_next_key,
291};
292
293const struct bpf_map_ops stack_map_ops = {
294 .map_alloc_check = queue_stack_map_alloc_check,
295 .map_alloc = queue_stack_map_alloc,
296 .map_free = queue_stack_map_free,
297 .map_lookup_elem = queue_stack_map_lookup_elem,
298 .map_update_elem = queue_stack_map_update_elem,
299 .map_delete_elem = queue_stack_map_delete_elem,
300 .map_push_elem = queue_stack_map_push_elem,
301 .map_pop_elem = stack_map_pop_elem,
302 .map_peek_elem = stack_map_peek_elem,
303 .map_get_next_key = queue_stack_map_get_next_key,
304};