blob: 1c69eb8735d536ae8f0c6d9ea2edc9dd39f606e6 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#include "netlink.h"
7#include "device.h"
8#include "peer.h"
9#include "socket.h"
10#include "queueing.h"
11#include "messages.h"
12
13#include <uapi/linux/wireguard.h>
14
15#include <linux/if.h>
16#include <net/genetlink.h>
17#include <net/sock.h>
18#include <crypto/algapi.h>
19
20static struct genl_family genl_family;
21
22static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
23 [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
24 [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
25 [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
26 [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
27 [WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
28 [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
29 [WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
30 [WGDEVICE_A_PEERS] = { .type = NLA_NESTED }
31};
32
33static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
34 [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
35 [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN),
36 [WGPEER_A_FLAGS] = { .type = NLA_U32 },
37 [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)),
38 [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
39 [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)),
40 [WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
41 [WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
42 [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
43 [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 }
44};
45
46static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
47 [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
48 [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)),
49 [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
50};
51
52static struct wg_device *lookup_interface(struct nlattr **attrs,
53 struct sk_buff *skb)
54{
55 struct net_device *dev = NULL;
56
57 if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME])
58 return ERR_PTR(-EBADR);
59 if (attrs[WGDEVICE_A_IFINDEX])
60 dev = dev_get_by_index(sock_net(skb->sk),
61 nla_get_u32(attrs[WGDEVICE_A_IFINDEX]));
62 else if (attrs[WGDEVICE_A_IFNAME])
63 dev = dev_get_by_name(sock_net(skb->sk),
64 nla_data(attrs[WGDEVICE_A_IFNAME]));
65 if (!dev)
66 return ERR_PTR(-ENODEV);
67 if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind ||
68 strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) {
69 dev_put(dev);
70 return ERR_PTR(-EOPNOTSUPP);
71 }
72 return netdev_priv(dev);
73}
74
75static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr,
76 int family)
77{
78 struct nlattr *allowedip_nest;
79
80 allowedip_nest = nla_nest_start(skb, 0);
81 if (!allowedip_nest)
82 return -EMSGSIZE;
83
84 if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) ||
85 nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) ||
86 nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ?
87 sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) {
88 nla_nest_cancel(skb, allowedip_nest);
89 return -EMSGSIZE;
90 }
91
92 nla_nest_end(skb, allowedip_nest);
93 return 0;
94}
95
96struct dump_ctx {
97 struct wg_device *wg;
98 struct wg_peer *next_peer;
99 u64 allowedips_seq;
100 struct allowedips_node *next_allowedip;
101};
102
103#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args)
104
105static int
106get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
107{
108
109 struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0);
110 struct allowedips_node *allowedips_node = ctx->next_allowedip;
111 bool fail;
112
113 if (!peer_nest)
114 return -EMSGSIZE;
115
116 down_read(&peer->handshake.lock);
117 fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN,
118 peer->handshake.remote_static);
119 up_read(&peer->handshake.lock);
120 if (fail)
121 goto err;
122
123 if (!allowedips_node) {
124 const struct __kernel_timespec last_handshake = {
125 .tv_sec = peer->walltime_last_handshake.tv_sec,
126 .tv_nsec = peer->walltime_last_handshake.tv_nsec
127 };
128
129 down_read(&peer->handshake.lock);
130 fail = nla_put(skb, WGPEER_A_PRESHARED_KEY,
131 NOISE_SYMMETRIC_KEY_LEN,
132 peer->handshake.preshared_key);
133 up_read(&peer->handshake.lock);
134 if (fail)
135 goto err;
136
137 if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME,
138 sizeof(last_handshake), &last_handshake) ||
139 nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
140 peer->persistent_keepalive_interval) ||
141 nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes,
142 WGPEER_A_UNSPEC) ||
143 nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes,
144 WGPEER_A_UNSPEC) ||
145 nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1))
146 goto err;
147
148 read_lock_bh(&peer->endpoint_lock);
149 if (peer->endpoint.addr.sa_family == AF_INET)
150 fail = nla_put(skb, WGPEER_A_ENDPOINT,
151 sizeof(peer->endpoint.addr4),
152 &peer->endpoint.addr4);
153 else if (peer->endpoint.addr.sa_family == AF_INET6)
154 fail = nla_put(skb, WGPEER_A_ENDPOINT,
155 sizeof(peer->endpoint.addr6),
156 &peer->endpoint.addr6);
157 read_unlock_bh(&peer->endpoint_lock);
158 if (fail)
159 goto err;
160 allowedips_node =
161 list_first_entry_or_null(&peer->allowedips_list,
162 struct allowedips_node, peer_list);
163 }
164 if (!allowedips_node)
165 goto no_allowedips;
166 if (!ctx->allowedips_seq)
167 ctx->allowedips_seq = peer->device->peer_allowedips.seq;
168 else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
169 goto no_allowedips;
170
171 allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
172 if (!allowedips_nest)
173 goto err;
174
175 list_for_each_entry_from(allowedips_node, &peer->allowedips_list,
176 peer_list) {
177 u8 cidr, ip[16] __aligned(__alignof(u64));
178 int family;
179
180 family = wg_allowedips_read_node(allowedips_node, ip, &cidr);
181 if (get_allowedips(skb, ip, cidr, family)) {
182 nla_nest_end(skb, allowedips_nest);
183 nla_nest_end(skb, peer_nest);
184 ctx->next_allowedip = allowedips_node;
185 return -EMSGSIZE;
186 }
187 }
188 nla_nest_end(skb, allowedips_nest);
189no_allowedips:
190 nla_nest_end(skb, peer_nest);
191 ctx->next_allowedip = NULL;
192 ctx->allowedips_seq = 0;
193 return 0;
194err:
195 nla_nest_cancel(skb, peer_nest);
196 return -EMSGSIZE;
197}
198
199static int wg_get_device_start(struct netlink_callback *cb)
200{
201 struct nlattr **attrs = genl_family_attrbuf(&genl_family);
202 struct wg_device *wg;
203 int ret;
204
205 ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + genl_family.hdrsize, attrs,
206 genl_family.maxattr, device_policy, NULL);
207 if (ret < 0)
208 return ret;
209 wg = lookup_interface(attrs, cb->skb);
210 if (IS_ERR(wg))
211 return PTR_ERR(wg);
212 DUMP_CTX(cb)->wg = wg;
213 return 0;
214}
215
216static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
217{
218 struct wg_peer *peer, *next_peer_cursor;
219 struct dump_ctx *ctx = DUMP_CTX(cb);
220 struct wg_device *wg = ctx->wg;
221 struct nlattr *peers_nest;
222 int ret = -EMSGSIZE;
223 bool done = true;
224 void *hdr;
225
226 rtnl_lock();
227 mutex_lock(&wg->device_update_lock);
228 cb->seq = wg->device_update_gen;
229 next_peer_cursor = ctx->next_peer;
230
231 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
232 &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE);
233 if (!hdr)
234 goto out;
235 genl_dump_check_consistent(cb, hdr);
236
237 if (!ctx->next_peer) {
238 if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT,
239 wg->incoming_port) ||
240 nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) ||
241 nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) ||
242 nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name))
243 goto out;
244
245 down_read(&wg->static_identity.lock);
246 if (wg->static_identity.has_identity) {
247 if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY,
248 NOISE_PUBLIC_KEY_LEN,
249 wg->static_identity.static_private) ||
250 nla_put(skb, WGDEVICE_A_PUBLIC_KEY,
251 NOISE_PUBLIC_KEY_LEN,
252 wg->static_identity.static_public)) {
253 up_read(&wg->static_identity.lock);
254 goto out;
255 }
256 }
257 up_read(&wg->static_identity.lock);
258 }
259
260 peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS);
261 if (!peers_nest)
262 goto out;
263 ret = 0;
264 /* If the last cursor was removed via list_del_init in peer_remove, then
265 * we just treat this the same as there being no more peers left. The
266 * reason is that seq_nr should indicate to userspace that this isn't a
267 * coherent dump anyway, so they'll try again.
268 */
269 if (list_empty(&wg->peer_list) ||
270 (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
271 nla_nest_cancel(skb, peers_nest);
272 goto out;
273 }
274 lockdep_assert_held(&wg->device_update_lock);
275 peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
276 list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
277 if (get_peer(peer, skb, ctx)) {
278 done = false;
279 break;
280 }
281 next_peer_cursor = peer;
282 }
283 nla_nest_end(skb, peers_nest);
284
285out:
286 if (!ret && !done && next_peer_cursor)
287 wg_peer_get(next_peer_cursor);
288 wg_peer_put(ctx->next_peer);
289 mutex_unlock(&wg->device_update_lock);
290 rtnl_unlock();
291
292 if (ret) {
293 genlmsg_cancel(skb, hdr);
294 return ret;
295 }
296 genlmsg_end(skb, hdr);
297 if (done) {
298 ctx->next_peer = NULL;
299 return 0;
300 }
301 ctx->next_peer = next_peer_cursor;
302 return skb->len;
303
304 /* At this point, we can't really deal ourselves with safely zeroing out
305 * the private key material after usage. This will need an additional API
306 * in the kernel for marking skbs as zero_on_free.
307 */
308}
309
310static int wg_get_device_done(struct netlink_callback *cb)
311{
312 struct dump_ctx *ctx = DUMP_CTX(cb);
313
314 if (ctx->wg)
315 dev_put(ctx->wg->dev);
316 wg_peer_put(ctx->next_peer);
317 return 0;
318}
319
320static int set_port(struct wg_device *wg, u16 port)
321{
322 struct wg_peer *peer;
323
324 if (wg->incoming_port == port)
325 return 0;
326 list_for_each_entry(peer, &wg->peer_list, peer_list)
327 wg_socket_clear_peer_endpoint_src(peer);
328 if (!netif_running(wg->dev)) {
329 wg->incoming_port = port;
330 return 0;
331 }
332 return wg_socket_init(wg, port);
333}
334
335static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
336{
337 int ret = -EINVAL;
338 u16 family;
339 u8 cidr;
340
341 if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] ||
342 !attrs[WGALLOWEDIP_A_CIDR_MASK])
343 return ret;
344 family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
345 cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
346
347 if (family == AF_INET && cidr <= 32 &&
348 nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
349 ret = wg_allowedips_insert_v4(
350 &peer->device->peer_allowedips,
351 nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
352 &peer->device->device_update_lock);
353 else if (family == AF_INET6 && cidr <= 128 &&
354 nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
355 ret = wg_allowedips_insert_v6(
356 &peer->device->peer_allowedips,
357 nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
358 &peer->device->device_update_lock);
359
360 return ret;
361}
362
363static int set_peer(struct wg_device *wg, struct nlattr **attrs)
364{
365 u8 *public_key = NULL, *preshared_key = NULL;
366 struct wg_peer *peer = NULL;
367 u32 flags = 0;
368 int ret;
369
370 ret = -EINVAL;
371 if (attrs[WGPEER_A_PUBLIC_KEY] &&
372 nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN)
373 public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]);
374 else
375 goto out;
376 if (attrs[WGPEER_A_PRESHARED_KEY] &&
377 nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN)
378 preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]);
379
380 if (attrs[WGPEER_A_FLAGS])
381 flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
382 ret = -EOPNOTSUPP;
383 if (flags & ~__WGPEER_F_ALL)
384 goto out;
385
386 ret = -EPFNOSUPPORT;
387 if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
388 if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1)
389 goto out;
390 }
391
392 peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
393 nla_data(attrs[WGPEER_A_PUBLIC_KEY]));
394 ret = 0;
395 if (!peer) { /* Peer doesn't exist yet. Add a new one. */
396 if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY))
397 goto out;
398
399 /* The peer is new, so there aren't allowed IPs to remove. */
400 flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS;
401
402 down_read(&wg->static_identity.lock);
403 if (wg->static_identity.has_identity &&
404 !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]),
405 wg->static_identity.static_public,
406 NOISE_PUBLIC_KEY_LEN)) {
407 /* We silently ignore peers that have the same public
408 * key as the device. The reason we do it silently is
409 * that we'd like for people to be able to reuse the
410 * same set of API calls across peers.
411 */
412 up_read(&wg->static_identity.lock);
413 ret = 0;
414 goto out;
415 }
416 up_read(&wg->static_identity.lock);
417
418 peer = wg_peer_create(wg, public_key, preshared_key);
419 if (IS_ERR(peer)) {
420 ret = PTR_ERR(peer);
421 peer = NULL;
422 goto out;
423 }
424 /* Take additional reference, as though we've just been
425 * looked up.
426 */
427 wg_peer_get(peer);
428 }
429
430 if (flags & WGPEER_F_REMOVE_ME) {
431 wg_peer_remove(peer);
432 goto out;
433 }
434
435 if (preshared_key) {
436 down_write(&peer->handshake.lock);
437 memcpy(&peer->handshake.preshared_key, preshared_key,
438 NOISE_SYMMETRIC_KEY_LEN);
439 up_write(&peer->handshake.lock);
440 }
441
442 if (attrs[WGPEER_A_ENDPOINT]) {
443 struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
444 size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
445
446 if ((len == sizeof(struct sockaddr_in) &&
447 addr->sa_family == AF_INET) ||
448 (len == sizeof(struct sockaddr_in6) &&
449 addr->sa_family == AF_INET6)) {
450 struct endpoint endpoint = { { { 0 } } };
451
452 memcpy(&endpoint.addr, addr, len);
453 wg_socket_set_peer_endpoint(peer, &endpoint);
454 }
455 }
456
457 if (flags & WGPEER_F_REPLACE_ALLOWEDIPS)
458 wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer,
459 &wg->device_update_lock);
460
461 if (attrs[WGPEER_A_ALLOWEDIPS]) {
462 struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1];
463 int rem;
464
465 nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
466 ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX,
467 attr, allowedip_policy, NULL);
468 if (ret < 0)
469 goto out;
470 ret = set_allowedip(peer, allowedip);
471 if (ret < 0)
472 goto out;
473 }
474 }
475
476 if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) {
477 const u16 persistent_keepalive_interval = nla_get_u16(
478 attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]);
479 const bool send_keepalive =
480 !peer->persistent_keepalive_interval &&
481 persistent_keepalive_interval &&
482 netif_running(wg->dev);
483
484 peer->persistent_keepalive_interval = persistent_keepalive_interval;
485 if (send_keepalive)
486 wg_packet_send_keepalive(peer);
487 }
488
489 if (netif_running(wg->dev))
490 wg_packet_send_staged_packets(peer);
491
492out:
493 wg_peer_put(peer);
494 if (attrs[WGPEER_A_PRESHARED_KEY])
495 memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]),
496 nla_len(attrs[WGPEER_A_PRESHARED_KEY]));
497 return ret;
498}
499
500static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
501{
502 struct wg_device *wg = lookup_interface(info->attrs, skb);
503 u32 flags = 0;
504 int ret;
505
506 if (IS_ERR(wg)) {
507 ret = PTR_ERR(wg);
508 goto out_nodev;
509 }
510
511 rtnl_lock();
512 mutex_lock(&wg->device_update_lock);
513
514 if (info->attrs[WGDEVICE_A_FLAGS])
515 flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
516 ret = -EOPNOTSUPP;
517 if (flags & ~__WGDEVICE_F_ALL)
518 goto out;
519
520 if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
521 struct net *net;
522 rcu_read_lock();
523 net = rcu_dereference(wg->creating_net);
524 ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
525 rcu_read_unlock();
526 if (ret)
527 goto out;
528 }
529
530 ++wg->device_update_gen;
531
532 if (info->attrs[WGDEVICE_A_FWMARK]) {
533 struct wg_peer *peer;
534
535 wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]);
536 list_for_each_entry(peer, &wg->peer_list, peer_list)
537 wg_socket_clear_peer_endpoint_src(peer);
538 }
539
540 if (info->attrs[WGDEVICE_A_LISTEN_PORT]) {
541 ret = set_port(wg,
542 nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT]));
543 if (ret)
544 goto out;
545 }
546
547 if (flags & WGDEVICE_F_REPLACE_PEERS)
548 wg_peer_remove_all(wg);
549
550 if (info->attrs[WGDEVICE_A_PRIVATE_KEY] &&
551 nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) ==
552 NOISE_PUBLIC_KEY_LEN) {
553 u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
554 u8 public_key[NOISE_PUBLIC_KEY_LEN];
555 struct wg_peer *peer, *temp;
556
557 if (!crypto_memneq(wg->static_identity.static_private,
558 private_key, NOISE_PUBLIC_KEY_LEN))
559 goto skip_set_private_key;
560
561 /* We remove before setting, to prevent race, which means doing
562 * two 25519-genpub ops.
563 */
564 if (curve25519_generate_public(public_key, private_key)) {
565 peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
566 public_key);
567 if (peer) {
568 wg_peer_put(peer);
569 wg_peer_remove(peer);
570 }
571 }
572
573 down_write(&wg->static_identity.lock);
574 wg_noise_set_static_identity_private_key(&wg->static_identity,
575 private_key);
576 list_for_each_entry_safe(peer, temp, &wg->peer_list,
577 peer_list) {
578 wg_noise_precompute_static_static(peer);
579 wg_noise_expire_current_peer_keypairs(peer);
580 }
581 wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
582 up_write(&wg->static_identity.lock);
583 }
584skip_set_private_key:
585
586 if (info->attrs[WGDEVICE_A_PEERS]) {
587 struct nlattr *attr, *peer[WGPEER_A_MAX + 1];
588 int rem;
589
590 nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) {
591 ret = nla_parse_nested(peer, WGPEER_A_MAX, attr,
592 peer_policy, NULL);
593 if (ret < 0)
594 goto out;
595 ret = set_peer(wg, peer);
596 if (ret < 0)
597 goto out;
598 }
599 }
600 ret = 0;
601
602out:
603 mutex_unlock(&wg->device_update_lock);
604 rtnl_unlock();
605 dev_put(wg->dev);
606out_nodev:
607 if (info->attrs[WGDEVICE_A_PRIVATE_KEY])
608 memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]),
609 nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]));
610 return ret;
611}
612
613static const struct genl_ops genl_ops[] = {
614 {
615 .cmd = WG_CMD_GET_DEVICE,
616 .start = wg_get_device_start,
617 .dumpit = wg_get_device_dump,
618 .done = wg_get_device_done,
619 .flags = GENL_UNS_ADMIN_PERM
620 }, {
621 .cmd = WG_CMD_SET_DEVICE,
622 .doit = wg_set_device,
623 .flags = GENL_UNS_ADMIN_PERM
624 }
625};
626
627static struct genl_family genl_family __ro_after_init = {
628 .ops = genl_ops,
629 .n_ops = ARRAY_SIZE(genl_ops),
630 .name = WG_GENL_NAME,
631 .version = WG_GENL_VERSION,
632 .maxattr = WGDEVICE_A_MAX,
633 .module = THIS_MODULE,
634 .policy = device_policy,
635 .netnsok = true
636};
637
638int __init wg_genetlink_init(void)
639{
640 return genl_register_family(&genl_family);
641}
642
643void __exit wg_genetlink_uninit(void)
644{
645 genl_unregister_family(&genl_family);
646}