blob: 474d9d686d8081f350964d88d6bcfa3408f253d8 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * xfrm_output.c - Common IPsec encapsulation code.
3 *
4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/errno.h>
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/netfilter.h>
16#include <linux/skbuff.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <net/dst.h>
20#include <net/xfrm.h>
21
22#include <net/ra_nat.h>
23static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
24
25static int xfrm_skb_check_space(struct sk_buff *skb)
26{
27 struct dst_entry *dst = skb_dst(skb);
28 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
29 - skb_headroom(skb);
30 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
31
32 if (nhead <= 0) {
33 if (ntail <= 0)
34 return 0;
35 nhead = 0;
36 } else if (ntail < 0)
37 ntail = 0;
38
39 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
40}
41
42/* Children define the path of the packet through the
43 * Linux networking. Thus, destinations are stackable.
44 */
45
46static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
47{
48 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
49
50 skb_dst_drop(skb);
51 return child;
52}
53
54static int xfrm_output_one(struct sk_buff *skb, int err)
55{
56 struct dst_entry *dst = skb_dst(skb);
57 struct xfrm_state *x = dst->xfrm;
58 struct net *net = xs_net(x);
59
60 if (err <= 0)
61 goto resume;
62
63 do {
64 err = xfrm_skb_check_space(skb);
65 if (err) {
66 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
67 goto error_nolock;
68 }
69
70 skb->mark = xfrm_smark_get(skb->mark, x);
71
72 err = x->outer_mode->output(x, skb);
73 if (err) {
74 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
75 goto error_nolock;
76 }
77
78 spin_lock_bh(&x->lock);
79
80 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
81 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
82 err = -EINVAL;
83 goto error;
84 }
85
86 err = xfrm_state_check_expire(x);
87 if (err) {
88 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
89 goto error;
90 }
91
92 err = x->repl->overflow(x, skb);
93 if (err) {
94 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
95 goto error;
96 }
97
98 x->curlft.bytes += skb->len;
99 x->curlft.packets++;
100
101 spin_unlock_bh(&x->lock);
102
103 hwnat_magic_tag_set_zero(skb);
104 skb_dst_force(skb);
105 if (!skb_dst(skb)) {
106 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
107 err = -EHOSTUNREACH;
108 goto error_nolock;
109 }
110
111 if (xfrm_offload(skb)) {
112 x->type_offload->encap(x, skb);
113 } else {
114 /* Inner headers are invalid now. */
115 skb->encapsulation = 0;
116
117 err = x->type->output(x, skb);
118 if (err == -EINPROGRESS)
119 goto out;
120 }
121
122resume:
123 if (err) {
124 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
125 goto error_nolock;
126 }
127
128 dst = skb_dst_pop(skb);
129 if (!dst) {
130 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
131 err = -EHOSTUNREACH;
132 goto error_nolock;
133 }
134 skb_dst_set(skb, dst);
135 x = dst->xfrm;
136 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
137
138 return 0;
139
140error:
141 spin_unlock_bh(&x->lock);
142error_nolock:
143 kfree_skb(skb);
144out:
145 return err;
146}
147
148int xfrm_output_resume(struct sk_buff *skb, int err)
149{
150 struct net *net = xs_net(skb_dst(skb)->xfrm);
151
152 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
153 nf_reset(skb);
154
155 err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
156 if (unlikely(err != 1))
157 goto out;
158
159 if (!skb_dst(skb)->xfrm)
160 return dst_output(net, skb->sk, skb);
161
162 err = nf_hook(skb_dst(skb)->ops->family,
163 NF_INET_POST_ROUTING, net, skb->sk, skb,
164 NULL, skb_dst(skb)->dev, xfrm_output2);
165 if (unlikely(err != 1))
166 goto out;
167 }
168
169 if (err == -EINPROGRESS)
170 err = 0;
171
172out:
173 return err;
174}
175EXPORT_SYMBOL_GPL(xfrm_output_resume);
176
177static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
178{
179 return xfrm_output_resume(skb, 1);
180}
181
182static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
183{
184 struct sk_buff *segs;
185
186 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
187 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
188 segs = skb_gso_segment(skb, 0);
189 kfree_skb(skb);
190 if (IS_ERR(segs))
191 return PTR_ERR(segs);
192 if (segs == NULL)
193 return -EINVAL;
194
195 do {
196 struct sk_buff *nskb = segs->next;
197 int err;
198
199 segs->next = NULL;
200 err = xfrm_output2(net, sk, segs);
201
202 if (unlikely(err)) {
203 kfree_skb_list(nskb);
204 return err;
205 }
206
207 segs = nskb;
208 } while (segs);
209
210 return 0;
211}
212
213int xfrm_output(struct sock *sk, struct sk_buff *skb)
214{
215 struct net *net = dev_net(skb_dst(skb)->dev);
216 struct xfrm_state *x = skb_dst(skb)->xfrm;
217 int err;
218
219 secpath_reset(skb);
220
221 if (xfrm_dev_offload_ok(skb, x)) {
222 struct sec_path *sp;
223
224 sp = secpath_dup(skb->sp);
225 if (!sp) {
226 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
227 kfree_skb(skb);
228 return -ENOMEM;
229 }
230 if (skb->sp)
231 secpath_put(skb->sp);
232 skb->sp = sp;
233 skb->encapsulation = 1;
234
235 sp->olen++;
236 sp->xvec[skb->sp->len++] = x;
237 xfrm_state_hold(x);
238
239 if (skb_is_gso(skb)) {
240 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
241
242 return xfrm_output2(net, sk, skb);
243 }
244
245 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
246 goto out;
247 }
248
249 if (skb_is_gso(skb))
250 return xfrm_output_gso(net, sk, skb);
251
252 if (skb->ip_summed == CHECKSUM_PARTIAL) {
253 err = skb_checksum_help(skb);
254 if (err) {
255 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
256 kfree_skb(skb);
257 return err;
258 }
259 }
260
261out:
262 return xfrm_output2(net, sk, skb);
263}
264EXPORT_SYMBOL_GPL(xfrm_output);
265
266int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
267{
268 struct xfrm_mode *inner_mode;
269 if (x->sel.family == AF_UNSPEC)
270 inner_mode = xfrm_ip2inner_mode(x,
271 xfrm_af2proto(skb_dst(skb)->ops->family));
272 else
273 inner_mode = x->inner_mode;
274
275 if (inner_mode == NULL)
276 return -EAFNOSUPPORT;
277 return inner_mode->afinfo->extract_output(x, skb);
278}
279EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
280
281void xfrm_local_error(struct sk_buff *skb, int mtu)
282{
283 unsigned int proto;
284 struct xfrm_state_afinfo *afinfo;
285
286 if (skb->protocol == htons(ETH_P_IP))
287 proto = AF_INET;
288 else if (skb->protocol == htons(ETH_P_IPV6))
289 proto = AF_INET6;
290 else
291 return;
292
293 afinfo = xfrm_state_get_afinfo(proto);
294 if (afinfo) {
295 afinfo->local_error(skb, mtu);
296 rcu_read_unlock();
297 }
298}
299EXPORT_SYMBOL_GPL(xfrm_local_error);