| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
| 2 | From: "Jason A. Donenfeld" <Jason@zx2c4.com> |
| 3 | Date: Fri, 14 Feb 2020 23:57:22 +0100 |
| 4 | Subject: [PATCH] wireguard: send: account for mtu=0 devices |
| 5 | |
| 6 | commit 175f1ca9a9ed8689d2028da1a7c624bb4fb4ff7e upstream. |
| 7 | |
| 8 | It turns out there's an easy way to get packets queued up while still |
| 9 | having an MTU of zero, and that's via persistent keep alive. This commit |
| 10 | makes sure that in whatever condition, we don't wind up dividing by |
| 11 | zero. Note that an MTU of zero for a wireguard interface is something |
| 12 | quasi-valid, so I don't think the correct fix is to limit it via |
| 13 | min_mtu. This can be reproduced easily with: |
| 14 | |
| 15 | ip link add wg0 type wireguard |
| 16 | ip link add wg1 type wireguard |
| 17 | ip link set wg0 up mtu 0 |
| 18 | ip link set wg1 up |
| 19 | wg set wg0 private-key <(wg genkey) |
| 20 | wg set wg1 listen-port 1 private-key <(wg genkey) peer $(wg show wg0 public-key) |
| 21 | wg set wg0 peer $(wg show wg1 public-key) persistent-keepalive 1 endpoint 127.0.0.1:1 |
| 22 | |
| 23 | However, while min_mtu=0 seems fine, it makes sense to restrict the |
| 24 | max_mtu. This commit also restricts the maximum MTU to the greatest |
| 25 | number for which rounding up to the padding multiple won't overflow a |
| 26 | signed integer. Packets this large were always rejected anyway |
| 27 | eventually, due to checks deeper in, but it seems more sound not to even |
| 28 | let the administrator configure something that won't work anyway. |
| 29 | |
| 30 | We use this opportunity to clean up this function a bit so that it's |
| 31 | clear which paths we're expecting. |
| 32 | |
| 33 | Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> |
| 34 | Cc: Eric Dumazet <eric.dumazet@gmail.com> |
| 35 | Reviewed-by: Eric Dumazet <edumazet@google.com> |
| 36 | Signed-off-by: David S. Miller <davem@davemloft.net> |
| 37 | Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> |
| 38 | --- |
| 39 | drivers/net/wireguard/device.c | 7 ++++--- |
| 40 | drivers/net/wireguard/send.c | 16 +++++++++++----- |
| 41 | 2 files changed, 15 insertions(+), 8 deletions(-) |
| 42 | |
| 43 | --- a/drivers/net/wireguard/device.c |
| 44 | +++ b/drivers/net/wireguard/device.c |
| 45 | @@ -258,6 +258,8 @@ static void wg_setup(struct net_device * |
| 46 | enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | |
| 47 | NETIF_F_SG | NETIF_F_GSO | |
| 48 | NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; |
| 49 | + const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + |
| 50 | + max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); |
| 51 | |
| 52 | dev->netdev_ops = &netdev_ops; |
| 53 | dev->hard_header_len = 0; |
| 54 | @@ -271,9 +273,8 @@ static void wg_setup(struct net_device * |
| 55 | dev->features |= WG_NETDEV_FEATURES; |
| 56 | dev->hw_features |= WG_NETDEV_FEATURES; |
| 57 | dev->hw_enc_features |= WG_NETDEV_FEATURES; |
| 58 | - dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - |
| 59 | - sizeof(struct udphdr) - |
| 60 | - max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); |
| 61 | + dev->mtu = ETH_DATA_LEN - overhead; |
| 62 | + dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; |
| 63 | |
| 64 | SET_NETDEV_DEVTYPE(dev, &device_type); |
| 65 | |
| 66 | --- a/drivers/net/wireguard/send.c |
| 67 | +++ b/drivers/net/wireguard/send.c |
| 68 | @@ -143,16 +143,22 @@ static void keep_key_fresh(struct wg_pee |
| 69 | |
| 70 | static unsigned int calculate_skb_padding(struct sk_buff *skb) |
| 71 | { |
| 72 | + unsigned int padded_size, last_unit = skb->len; |
| 73 | + |
| 74 | + if (unlikely(!PACKET_CB(skb)->mtu)) |
| 75 | + return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit; |
| 76 | + |
| 77 | /* We do this modulo business with the MTU, just in case the networking |
| 78 | * layer gives us a packet that's bigger than the MTU. In that case, we |
| 79 | * wouldn't want the final subtraction to overflow in the case of the |
| 80 | - * padded_size being clamped. |
| 81 | + * padded_size being clamped. Fortunately, that's very rarely the case, |
| 82 | + * so we optimize for that not happening. |
| 83 | */ |
| 84 | - unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; |
| 85 | - unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); |
| 86 | + if (unlikely(last_unit > PACKET_CB(skb)->mtu)) |
| 87 | + last_unit %= PACKET_CB(skb)->mtu; |
| 88 | |
| 89 | - if (padded_size > PACKET_CB(skb)->mtu) |
| 90 | - padded_size = PACKET_CB(skb)->mtu; |
| 91 | + padded_size = min(PACKET_CB(skb)->mtu, |
| 92 | + ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE)); |
| 93 | return padded_size - last_unit; |
| 94 | } |
| 95 | |