blob: d9117ab035f7c5dea2455d88f3820be84de2fb99 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6#include <linux/init.h>
7#include <linux/sched/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/slab.h>
12#include <linux/bpf.h>
13#include <linux/mm.h>
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16
17#include "xdp_umem.h"
18#include "xsk_queue.h"
19
20#define XDP_UMEM_MIN_CHUNK_SIZE 2048
21
22void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
23{
24 unsigned long flags;
25
26 spin_lock_irqsave(&umem->xsk_list_lock, flags);
27 list_add_rcu(&xs->list, &umem->xsk_list);
28 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
29}
30
31void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
32{
33 unsigned long flags;
34
35 spin_lock_irqsave(&umem->xsk_list_lock, flags);
36 list_del_rcu(&xs->list);
37 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
38}
39
40int xdp_umem_query(struct net_device *dev, u16 queue_id)
41{
42 struct netdev_bpf bpf;
43
44 ASSERT_RTNL();
45
46 memset(&bpf, 0, sizeof(bpf));
47 bpf.command = XDP_QUERY_XSK_UMEM;
48 bpf.xsk.queue_id = queue_id;
49
50 if (!dev->netdev_ops->ndo_bpf)
51 return 0;
52 return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
53}
54
55int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
56 u32 queue_id, u16 flags)
57{
58 bool force_zc, force_copy;
59 struct netdev_bpf bpf;
60 int err;
61
62 force_zc = flags & XDP_ZEROCOPY;
63 force_copy = flags & XDP_COPY;
64
65 if (force_zc && force_copy)
66 return -EINVAL;
67
68 if (force_copy)
69 return 0;
70
71 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
72 return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
73
74 bpf.command = XDP_QUERY_XSK_UMEM;
75
76 rtnl_lock();
77 err = xdp_umem_query(dev, queue_id);
78 if (err) {
79 err = err < 0 ? -EOPNOTSUPP : -EBUSY;
80 goto err_rtnl_unlock;
81 }
82
83 bpf.command = XDP_SETUP_XSK_UMEM;
84 bpf.xsk.umem = umem;
85 bpf.xsk.queue_id = queue_id;
86
87 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
88 if (err)
89 goto err_rtnl_unlock;
90 rtnl_unlock();
91
92 dev_hold(dev);
93 umem->dev = dev;
94 umem->queue_id = queue_id;
95 umem->zc = true;
96 return 0;
97
98err_rtnl_unlock:
99 rtnl_unlock();
100 return force_zc ? err : 0; /* fail or fallback */
101}
102
103static void xdp_umem_clear_dev(struct xdp_umem *umem)
104{
105 struct netdev_bpf bpf;
106 int err;
107
108 if (umem->dev) {
109 bpf.command = XDP_SETUP_XSK_UMEM;
110 bpf.xsk.umem = NULL;
111 bpf.xsk.queue_id = umem->queue_id;
112
113 rtnl_lock();
114 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
115 rtnl_unlock();
116
117 if (err)
118 WARN(1, "failed to disable umem!\n");
119
120 dev_put(umem->dev);
121 umem->dev = NULL;
122 }
123}
124
125static void xdp_umem_unpin_pages(struct xdp_umem *umem)
126{
127 unsigned int i;
128
129 for (i = 0; i < umem->npgs; i++) {
130 struct page *page = umem->pgs[i];
131
132 set_page_dirty_lock(page);
133 put_page(page);
134 }
135
136 kfree(umem->pgs);
137 umem->pgs = NULL;
138}
139
140static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
141{
142 if (umem->user) {
143 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
144 free_uid(umem->user);
145 }
146}
147
148static void xdp_umem_release(struct xdp_umem *umem)
149{
150 xdp_umem_clear_dev(umem);
151
152 if (umem->fq) {
153 xskq_destroy(umem->fq);
154 umem->fq = NULL;
155 }
156
157 if (umem->cq) {
158 xskq_destroy(umem->cq);
159 umem->cq = NULL;
160 }
161
162 xdp_umem_unpin_pages(umem);
163
164 kfree(umem->pages);
165 umem->pages = NULL;
166
167 xdp_umem_unaccount_pages(umem);
168 kfree(umem);
169}
170
171static void xdp_umem_release_deferred(struct work_struct *work)
172{
173 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
174
175 xdp_umem_release(umem);
176}
177
178void xdp_get_umem(struct xdp_umem *umem)
179{
180 refcount_inc(&umem->users);
181}
182
183void xdp_put_umem(struct xdp_umem *umem)
184{
185 if (!umem)
186 return;
187
188 if (refcount_dec_and_test(&umem->users)) {
189 INIT_WORK(&umem->work, xdp_umem_release_deferred);
190 schedule_work(&umem->work);
191 }
192}
193
194static int xdp_umem_pin_pages(struct xdp_umem *umem)
195{
196 unsigned int gup_flags = FOLL_WRITE;
197 long npgs;
198 int err;
199
200 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
201 GFP_KERNEL | __GFP_NOWARN);
202 if (!umem->pgs)
203 return -ENOMEM;
204
205 down_write(&current->mm->mmap_sem);
206 npgs = get_user_pages(umem->address, umem->npgs,
207 gup_flags, &umem->pgs[0], NULL);
208 up_write(&current->mm->mmap_sem);
209
210 if (npgs != umem->npgs) {
211 if (npgs >= 0) {
212 umem->npgs = npgs;
213 err = -ENOMEM;
214 goto out_pin;
215 }
216 err = npgs;
217 goto out_pgs;
218 }
219 return 0;
220
221out_pin:
222 xdp_umem_unpin_pages(umem);
223out_pgs:
224 kfree(umem->pgs);
225 umem->pgs = NULL;
226 return err;
227}
228
229static int xdp_umem_account_pages(struct xdp_umem *umem)
230{
231 unsigned long lock_limit, new_npgs, old_npgs;
232
233 if (capable(CAP_IPC_LOCK))
234 return 0;
235
236 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
237 umem->user = get_uid(current_user());
238
239 do {
240 old_npgs = atomic_long_read(&umem->user->locked_vm);
241 new_npgs = old_npgs + umem->npgs;
242 if (new_npgs > lock_limit) {
243 free_uid(umem->user);
244 umem->user = NULL;
245 return -ENOBUFS;
246 }
247 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
248 new_npgs) != old_npgs);
249 return 0;
250}
251
252static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
253{
254 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
255 unsigned int chunks, chunks_per_page;
256 u64 addr = mr->addr, size = mr->len;
257 int size_chk, err, i;
258
259 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
260 /* Strictly speaking we could support this, if:
261 * - huge pages, or*
262 * - using an IOMMU, or
263 * - making sure the memory area is consecutive
264 * but for now, we simply say "computer says no".
265 */
266 return -EINVAL;
267 }
268
269 if (!is_power_of_2(chunk_size))
270 return -EINVAL;
271
272 if (!PAGE_ALIGNED(addr)) {
273 /* Memory area has to be page size aligned. For
274 * simplicity, this might change.
275 */
276 return -EINVAL;
277 }
278
279 if ((addr + size) < addr)
280 return -EINVAL;
281
282 chunks = (unsigned int)div_u64(size, chunk_size);
283 if (chunks == 0)
284 return -EINVAL;
285
286 chunks_per_page = PAGE_SIZE / chunk_size;
287 if (chunks < chunks_per_page || chunks % chunks_per_page)
288 return -EINVAL;
289
290 headroom = ALIGN(headroom, 64);
291
292 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
293 if (size_chk < 0)
294 return -EINVAL;
295
296 umem->address = (unsigned long)addr;
297 umem->props.chunk_mask = ~((u64)chunk_size - 1);
298 umem->props.size = size;
299 umem->headroom = headroom;
300 umem->chunk_size_nohr = chunk_size - headroom;
301 umem->npgs = size / PAGE_SIZE;
302 umem->pgs = NULL;
303 umem->user = NULL;
304 INIT_LIST_HEAD(&umem->xsk_list);
305 spin_lock_init(&umem->xsk_list_lock);
306
307 refcount_set(&umem->users, 1);
308
309 err = xdp_umem_account_pages(umem);
310 if (err)
311 return err;
312
313 err = xdp_umem_pin_pages(umem);
314 if (err)
315 goto out_account;
316
317 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
318 if (!umem->pages) {
319 err = -ENOMEM;
320 goto out_pin;
321 }
322
323 for (i = 0; i < umem->npgs; i++)
324 umem->pages[i].addr = page_address(umem->pgs[i]);
325
326 return 0;
327
328out_pin:
329 xdp_umem_unpin_pages(umem);
330out_account:
331 xdp_umem_unaccount_pages(umem);
332 return err;
333}
334
335struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
336{
337 struct xdp_umem *umem;
338 int err;
339
340 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
341 if (!umem)
342 return ERR_PTR(-ENOMEM);
343
344 err = xdp_umem_reg(umem, mr);
345 if (err) {
346 kfree(umem);
347 return ERR_PTR(err);
348 }
349
350 return umem;
351}
352
353bool xdp_umem_validate_queues(struct xdp_umem *umem)
354{
355 return umem->fq && umem->cq;
356}