blob: 7cef078304c3d61ae45c8d30e2ec59efb8920a3d [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2007-2017 Nicira, Inc.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/skbuff.h>
9#include <linux/in.h>
10#include <linux/ip.h>
11#include <linux/openvswitch.h>
12#include <linux/netfilter_ipv6.h>
13#include <linux/sctp.h>
14#include <linux/tcp.h>
15#include <linux/udp.h>
16#include <linux/in6.h>
17#include <linux/if_arp.h>
18#include <linux/if_vlan.h>
19
20#include <net/dst.h>
21#include <net/ip.h>
22#include <net/ipv6.h>
23#include <net/ip6_fib.h>
24#include <net/checksum.h>
25#include <net/dsfield.h>
26#include <net/mpls.h>
27#include <net/sctp/checksum.h>
28
29#include "datapath.h"
30#include "flow.h"
31#include "conntrack.h"
32#include "vport.h"
33#include "flow_netlink.h"
34
35struct deferred_action {
36 struct sk_buff *skb;
37 const struct nlattr *actions;
38 int actions_len;
39
40 /* Store pkt_key clone when creating deferred action. */
41 struct sw_flow_key pkt_key;
42};
43
44#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
45struct ovs_frag_data {
46 unsigned long dst;
47 struct vport *vport;
48 struct ovs_skb_cb cb;
49 __be16 inner_protocol;
50 u16 network_offset; /* valid only for MPLS */
51 u16 vlan_tci;
52 __be16 vlan_proto;
53 unsigned int l2_len;
54 u8 mac_proto;
55 u8 l2_data[MAX_L2_LEN];
56};
57
58static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
59
60#define DEFERRED_ACTION_FIFO_SIZE 10
61#define OVS_RECURSION_LIMIT 5
62#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
63struct action_fifo {
64 int head;
65 int tail;
66 /* Deferred action fifo queue storage. */
67 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
68};
69
70struct action_flow_keys {
71 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
72};
73
74static struct action_fifo __percpu *action_fifos;
75static struct action_flow_keys __percpu *flow_keys;
76static DEFINE_PER_CPU(int, exec_actions_level);
77
78/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
79 * space. Return NULL if out of key spaces.
80 */
81static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
82{
83 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
84 int level = this_cpu_read(exec_actions_level);
85 struct sw_flow_key *key = NULL;
86
87 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
88 key = &keys->key[level - 1];
89 *key = *key_;
90 }
91
92 return key;
93}
94
95static void action_fifo_init(struct action_fifo *fifo)
96{
97 fifo->head = 0;
98 fifo->tail = 0;
99}
100
101static bool action_fifo_is_empty(const struct action_fifo *fifo)
102{
103 return (fifo->head == fifo->tail);
104}
105
106static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
107{
108 if (action_fifo_is_empty(fifo))
109 return NULL;
110
111 return &fifo->fifo[fifo->tail++];
112}
113
114static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
115{
116 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
117 return NULL;
118
119 return &fifo->fifo[fifo->head++];
120}
121
122/* Return true if fifo is not full */
123static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
124 const struct sw_flow_key *key,
125 const struct nlattr *actions,
126 const int actions_len)
127{
128 struct action_fifo *fifo;
129 struct deferred_action *da;
130
131 fifo = this_cpu_ptr(action_fifos);
132 da = action_fifo_put(fifo);
133 if (da) {
134 da->skb = skb;
135 da->actions = actions;
136 da->actions_len = actions_len;
137 da->pkt_key = *key;
138 }
139
140 return da;
141}
142
143static void invalidate_flow_key(struct sw_flow_key *key)
144{
145 key->mac_proto |= SW_FLOW_KEY_INVALID;
146}
147
148static bool is_flow_key_valid(const struct sw_flow_key *key)
149{
150 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
151}
152
153static int clone_execute(struct datapath *dp, struct sk_buff *skb,
154 struct sw_flow_key *key,
155 u32 recirc_id,
156 const struct nlattr *actions, int len,
157 bool last, bool clone_flow_key);
158
159static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
160 struct sw_flow_key *key,
161 const struct nlattr *attr, int len);
162
163static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
164 const struct ovs_action_push_mpls *mpls)
165{
166 int err;
167
168 err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
169 skb->mac_len,
170 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
171 if (err)
172 return err;
173
174 invalidate_flow_key(key);
175 return 0;
176}
177
178static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
179 const __be16 ethertype)
180{
181 int err;
182
183 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
184 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
185 if (err)
186 return err;
187
188 invalidate_flow_key(key);
189 return 0;
190}
191
192static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
193 const __be32 *mpls_lse, const __be32 *mask)
194{
195 struct mpls_shim_hdr *stack;
196 __be32 lse;
197 int err;
198
199 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
200 return -ENOMEM;
201
202 stack = mpls_hdr(skb);
203 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
204 err = skb_mpls_update_lse(skb, lse);
205 if (err)
206 return err;
207
208 flow_key->mpls.top_lse = lse;
209 return 0;
210}
211
212static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
213{
214 int err;
215
216 err = skb_vlan_pop(skb);
217 if (skb_vlan_tag_present(skb)) {
218 invalidate_flow_key(key);
219 } else {
220 key->eth.vlan.tci = 0;
221 key->eth.vlan.tpid = 0;
222 }
223 return err;
224}
225
226static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
227 const struct ovs_action_push_vlan *vlan)
228{
229 if (skb_vlan_tag_present(skb)) {
230 invalidate_flow_key(key);
231 } else {
232 key->eth.vlan.tci = vlan->vlan_tci;
233 key->eth.vlan.tpid = vlan->vlan_tpid;
234 }
235 return skb_vlan_push(skb, vlan->vlan_tpid,
236 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
237}
238
239/* 'src' is already properly masked. */
240static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
241{
242 u16 *dst = (u16 *)dst_;
243 const u16 *src = (const u16 *)src_;
244 const u16 *mask = (const u16 *)mask_;
245
246 OVS_SET_MASKED(dst[0], src[0], mask[0]);
247 OVS_SET_MASKED(dst[1], src[1], mask[1]);
248 OVS_SET_MASKED(dst[2], src[2], mask[2]);
249}
250
251static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
252 const struct ovs_key_ethernet *key,
253 const struct ovs_key_ethernet *mask)
254{
255 int err;
256
257 err = skb_ensure_writable(skb, ETH_HLEN);
258 if (unlikely(err))
259 return err;
260
261 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
262
263 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
264 mask->eth_src);
265 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
266 mask->eth_dst);
267
268 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
269
270 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
271 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
272 return 0;
273}
274
275/* pop_eth does not support VLAN packets as this action is never called
276 * for them.
277 */
278static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
279{
280 skb_pull_rcsum(skb, ETH_HLEN);
281 skb_reset_mac_header(skb);
282 skb_reset_mac_len(skb);
283
284 /* safe right before invalidate_flow_key */
285 key->mac_proto = MAC_PROTO_NONE;
286 invalidate_flow_key(key);
287 return 0;
288}
289
290static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
291 const struct ovs_action_push_eth *ethh)
292{
293 struct ethhdr *hdr;
294
295 /* Add the new Ethernet header */
296 if (skb_cow_head(skb, ETH_HLEN) < 0)
297 return -ENOMEM;
298
299 skb_push(skb, ETH_HLEN);
300 skb_reset_mac_header(skb);
301 skb_reset_mac_len(skb);
302
303 hdr = eth_hdr(skb);
304 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
305 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
306 hdr->h_proto = skb->protocol;
307
308 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
309
310 /* safe right before invalidate_flow_key */
311 key->mac_proto = MAC_PROTO_ETHERNET;
312 invalidate_flow_key(key);
313 return 0;
314}
315
316static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
317 const struct nshhdr *nh)
318{
319 int err;
320
321 err = nsh_push(skb, nh);
322 if (err)
323 return err;
324
325 /* safe right before invalidate_flow_key */
326 key->mac_proto = MAC_PROTO_NONE;
327 invalidate_flow_key(key);
328 return 0;
329}
330
331static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
332{
333 int err;
334
335 err = nsh_pop(skb);
336 if (err)
337 return err;
338
339 /* safe right before invalidate_flow_key */
340 if (skb->protocol == htons(ETH_P_TEB))
341 key->mac_proto = MAC_PROTO_ETHERNET;
342 else
343 key->mac_proto = MAC_PROTO_NONE;
344 invalidate_flow_key(key);
345 return 0;
346}
347
348static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
349 __be32 addr, __be32 new_addr)
350{
351 int transport_len = skb->len - skb_transport_offset(skb);
352
353 if (nh->frag_off & htons(IP_OFFSET))
354 return;
355
356 if (nh->protocol == IPPROTO_TCP) {
357 if (likely(transport_len >= sizeof(struct tcphdr)))
358 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
359 addr, new_addr, true);
360 } else if (nh->protocol == IPPROTO_UDP) {
361 if (likely(transport_len >= sizeof(struct udphdr))) {
362 struct udphdr *uh = udp_hdr(skb);
363
364 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
365 inet_proto_csum_replace4(&uh->check, skb,
366 addr, new_addr, true);
367 if (!uh->check)
368 uh->check = CSUM_MANGLED_0;
369 }
370 }
371 }
372}
373
374static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
375 __be32 *addr, __be32 new_addr)
376{
377 update_ip_l4_checksum(skb, nh, *addr, new_addr);
378 csum_replace4(&nh->check, *addr, new_addr);
379 skb_clear_hash(skb);
380 ovs_ct_clear(skb, NULL);
381 *addr = new_addr;
382}
383
384static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
385 __be32 addr[4], const __be32 new_addr[4])
386{
387 int transport_len = skb->len - skb_transport_offset(skb);
388
389 if (l4_proto == NEXTHDR_TCP) {
390 if (likely(transport_len >= sizeof(struct tcphdr)))
391 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
392 addr, new_addr, true);
393 } else if (l4_proto == NEXTHDR_UDP) {
394 if (likely(transport_len >= sizeof(struct udphdr))) {
395 struct udphdr *uh = udp_hdr(skb);
396
397 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
398 inet_proto_csum_replace16(&uh->check, skb,
399 addr, new_addr, true);
400 if (!uh->check)
401 uh->check = CSUM_MANGLED_0;
402 }
403 }
404 } else if (l4_proto == NEXTHDR_ICMP) {
405 if (likely(transport_len >= sizeof(struct icmp6hdr)))
406 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
407 skb, addr, new_addr, true);
408 }
409}
410
411static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
412 const __be32 mask[4], __be32 masked[4])
413{
414 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
415 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
416 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
417 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
418}
419
420static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
421 __be32 addr[4], const __be32 new_addr[4],
422 bool recalculate_csum)
423{
424 if (recalculate_csum)
425 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
426
427 skb_clear_hash(skb);
428 ovs_ct_clear(skb, NULL);
429 memcpy(addr, new_addr, sizeof(__be32[4]));
430}
431
432static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
433{
434 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
435
436 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
437
438 if (skb->ip_summed == CHECKSUM_COMPLETE)
439 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
440 (__force __wsum)(ipv6_tclass << 12));
441
442 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
443}
444
445static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
446{
447 u32 ofl;
448
449 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
450 fl = OVS_MASKED(ofl, fl, mask);
451
452 /* Bits 21-24 are always unmasked, so this retains their values. */
453 nh->flow_lbl[0] = (u8)(fl >> 16);
454 nh->flow_lbl[1] = (u8)(fl >> 8);
455 nh->flow_lbl[2] = (u8)fl;
456
457 if (skb->ip_summed == CHECKSUM_COMPLETE)
458 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
459}
460
461static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
462{
463 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
464
465 if (skb->ip_summed == CHECKSUM_COMPLETE)
466 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
467 (__force __wsum)(new_ttl << 8));
468 nh->hop_limit = new_ttl;
469}
470
471static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
472 u8 mask)
473{
474 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
475
476 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
477 nh->ttl = new_ttl;
478}
479
480static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
481 const struct ovs_key_ipv4 *key,
482 const struct ovs_key_ipv4 *mask)
483{
484 struct iphdr *nh;
485 __be32 new_addr;
486 int err;
487
488 err = skb_ensure_writable(skb, skb_network_offset(skb) +
489 sizeof(struct iphdr));
490 if (unlikely(err))
491 return err;
492
493 nh = ip_hdr(skb);
494
495 /* Setting an IP addresses is typically only a side effect of
496 * matching on them in the current userspace implementation, so it
497 * makes sense to check if the value actually changed.
498 */
499 if (mask->ipv4_src) {
500 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
501
502 if (unlikely(new_addr != nh->saddr)) {
503 set_ip_addr(skb, nh, &nh->saddr, new_addr);
504 flow_key->ipv4.addr.src = new_addr;
505 }
506 }
507 if (mask->ipv4_dst) {
508 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
509
510 if (unlikely(new_addr != nh->daddr)) {
511 set_ip_addr(skb, nh, &nh->daddr, new_addr);
512 flow_key->ipv4.addr.dst = new_addr;
513 }
514 }
515 if (mask->ipv4_tos) {
516 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
517 flow_key->ip.tos = nh->tos;
518 }
519 if (mask->ipv4_ttl) {
520 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
521 flow_key->ip.ttl = nh->ttl;
522 }
523
524 return 0;
525}
526
527static bool is_ipv6_mask_nonzero(const __be32 addr[4])
528{
529 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
530}
531
532static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
533 const struct ovs_key_ipv6 *key,
534 const struct ovs_key_ipv6 *mask)
535{
536 struct ipv6hdr *nh;
537 int err;
538
539 err = skb_ensure_writable(skb, skb_network_offset(skb) +
540 sizeof(struct ipv6hdr));
541 if (unlikely(err))
542 return err;
543
544 nh = ipv6_hdr(skb);
545
546 /* Setting an IP addresses is typically only a side effect of
547 * matching on them in the current userspace implementation, so it
548 * makes sense to check if the value actually changed.
549 */
550 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
551 __be32 *saddr = (__be32 *)&nh->saddr;
552 __be32 masked[4];
553
554 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
555
556 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
557 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
558 true);
559 memcpy(&flow_key->ipv6.addr.src, masked,
560 sizeof(flow_key->ipv6.addr.src));
561 }
562 }
563 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
564 unsigned int offset = 0;
565 int flags = IP6_FH_F_SKIP_RH;
566 bool recalc_csum = true;
567 __be32 *daddr = (__be32 *)&nh->daddr;
568 __be32 masked[4];
569
570 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
571
572 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
573 if (ipv6_ext_hdr(nh->nexthdr))
574 recalc_csum = (ipv6_find_hdr(skb, &offset,
575 NEXTHDR_ROUTING,
576 NULL, &flags)
577 != NEXTHDR_ROUTING);
578
579 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
580 recalc_csum);
581 memcpy(&flow_key->ipv6.addr.dst, masked,
582 sizeof(flow_key->ipv6.addr.dst));
583 }
584 }
585 if (mask->ipv6_tclass) {
586 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
587 flow_key->ip.tos = ipv6_get_dsfield(nh);
588 }
589 if (mask->ipv6_label) {
590 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
591 ntohl(mask->ipv6_label));
592 flow_key->ipv6.label =
593 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
594 }
595 if (mask->ipv6_hlimit) {
596 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
597 flow_key->ip.ttl = nh->hop_limit;
598 }
599 return 0;
600}
601
602static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
603 const struct nlattr *a)
604{
605 struct nshhdr *nh;
606 size_t length;
607 int err;
608 u8 flags;
609 u8 ttl;
610 int i;
611
612 struct ovs_key_nsh key;
613 struct ovs_key_nsh mask;
614
615 err = nsh_key_from_nlattr(a, &key, &mask);
616 if (err)
617 return err;
618
619 /* Make sure the NSH base header is there */
620 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
621 return -ENOMEM;
622
623 nh = nsh_hdr(skb);
624 length = nsh_hdr_len(nh);
625
626 /* Make sure the whole NSH header is there */
627 err = skb_ensure_writable(skb, skb_network_offset(skb) +
628 length);
629 if (unlikely(err))
630 return err;
631
632 nh = nsh_hdr(skb);
633 skb_postpull_rcsum(skb, nh, length);
634 flags = nsh_get_flags(nh);
635 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
636 flow_key->nsh.base.flags = flags;
637 ttl = nsh_get_ttl(nh);
638 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
639 flow_key->nsh.base.ttl = ttl;
640 nsh_set_flags_and_ttl(nh, flags, ttl);
641 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
642 mask.base.path_hdr);
643 flow_key->nsh.base.path_hdr = nh->path_hdr;
644 switch (nh->mdtype) {
645 case NSH_M_TYPE1:
646 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
647 nh->md1.context[i] =
648 OVS_MASKED(nh->md1.context[i], key.context[i],
649 mask.context[i]);
650 }
651 memcpy(flow_key->nsh.context, nh->md1.context,
652 sizeof(nh->md1.context));
653 break;
654 case NSH_M_TYPE2:
655 memset(flow_key->nsh.context, 0,
656 sizeof(flow_key->nsh.context));
657 break;
658 default:
659 return -EINVAL;
660 }
661 skb_postpush_rcsum(skb, nh, length);
662 return 0;
663}
664
665/* Must follow skb_ensure_writable() since that can move the skb data. */
666static void set_tp_port(struct sk_buff *skb, __be16 *port,
667 __be16 new_port, __sum16 *check)
668{
669 ovs_ct_clear(skb, NULL);
670 inet_proto_csum_replace2(check, skb, *port, new_port, false);
671 *port = new_port;
672}
673
674static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
675 const struct ovs_key_udp *key,
676 const struct ovs_key_udp *mask)
677{
678 struct udphdr *uh;
679 __be16 src, dst;
680 int err;
681
682 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
683 sizeof(struct udphdr));
684 if (unlikely(err))
685 return err;
686
687 uh = udp_hdr(skb);
688 /* Either of the masks is non-zero, so do not bother checking them. */
689 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
690 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
691
692 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
693 if (likely(src != uh->source)) {
694 set_tp_port(skb, &uh->source, src, &uh->check);
695 flow_key->tp.src = src;
696 }
697 if (likely(dst != uh->dest)) {
698 set_tp_port(skb, &uh->dest, dst, &uh->check);
699 flow_key->tp.dst = dst;
700 }
701
702 if (unlikely(!uh->check))
703 uh->check = CSUM_MANGLED_0;
704 } else {
705 uh->source = src;
706 uh->dest = dst;
707 flow_key->tp.src = src;
708 flow_key->tp.dst = dst;
709 ovs_ct_clear(skb, NULL);
710 }
711
712 skb_clear_hash(skb);
713
714 return 0;
715}
716
717static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
718 const struct ovs_key_tcp *key,
719 const struct ovs_key_tcp *mask)
720{
721 struct tcphdr *th;
722 __be16 src, dst;
723 int err;
724
725 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
726 sizeof(struct tcphdr));
727 if (unlikely(err))
728 return err;
729
730 th = tcp_hdr(skb);
731 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
732 if (likely(src != th->source)) {
733 set_tp_port(skb, &th->source, src, &th->check);
734 flow_key->tp.src = src;
735 }
736 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
737 if (likely(dst != th->dest)) {
738 set_tp_port(skb, &th->dest, dst, &th->check);
739 flow_key->tp.dst = dst;
740 }
741 skb_clear_hash(skb);
742
743 return 0;
744}
745
746static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
747 const struct ovs_key_sctp *key,
748 const struct ovs_key_sctp *mask)
749{
750 unsigned int sctphoff = skb_transport_offset(skb);
751 struct sctphdr *sh;
752 __le32 old_correct_csum, new_csum, old_csum;
753 int err;
754
755 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
756 if (unlikely(err))
757 return err;
758
759 sh = sctp_hdr(skb);
760 old_csum = sh->checksum;
761 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
762
763 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
764 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
765
766 new_csum = sctp_compute_cksum(skb, sctphoff);
767
768 /* Carry any checksum errors through. */
769 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
770
771 skb_clear_hash(skb);
772 ovs_ct_clear(skb, NULL);
773
774 flow_key->tp.src = sh->source;
775 flow_key->tp.dst = sh->dest;
776
777 return 0;
778}
779
780static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
781{
782 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
783 struct vport *vport = data->vport;
784
785 if (skb_cow_head(skb, data->l2_len) < 0) {
786 kfree_skb(skb);
787 return -ENOMEM;
788 }
789
790 __skb_dst_copy(skb, data->dst);
791 *OVS_CB(skb) = data->cb;
792 skb->inner_protocol = data->inner_protocol;
793 if (data->vlan_tci & VLAN_CFI_MASK)
794 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
795 else
796 __vlan_hwaccel_clear_tag(skb);
797
798 /* Reconstruct the MAC header. */
799 skb_push(skb, data->l2_len);
800 memcpy(skb->data, &data->l2_data, data->l2_len);
801 skb_postpush_rcsum(skb, skb->data, data->l2_len);
802 skb_reset_mac_header(skb);
803
804 if (eth_p_mpls(skb->protocol)) {
805 skb->inner_network_header = skb->network_header;
806 skb_set_network_header(skb, data->network_offset);
807 skb_reset_mac_len(skb);
808 }
809
810 ovs_vport_send(vport, skb, data->mac_proto);
811 return 0;
812}
813
814static unsigned int
815ovs_dst_get_mtu(const struct dst_entry *dst)
816{
817 return dst->dev->mtu;
818}
819
820static struct dst_ops ovs_dst_ops = {
821 .family = AF_UNSPEC,
822 .mtu = ovs_dst_get_mtu,
823};
824
825/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
826 * ovs_vport_output(), which is called once per fragmented packet.
827 */
828static void prepare_frag(struct vport *vport, struct sk_buff *skb,
829 u16 orig_network_offset, u8 mac_proto)
830{
831 unsigned int hlen = skb_network_offset(skb);
832 struct ovs_frag_data *data;
833
834 data = this_cpu_ptr(&ovs_frag_data_storage);
835 data->dst = skb->_skb_refdst;
836 data->vport = vport;
837 data->cb = *OVS_CB(skb);
838 data->inner_protocol = skb->inner_protocol;
839 data->network_offset = orig_network_offset;
840 if (skb_vlan_tag_present(skb))
841 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
842 else
843 data->vlan_tci = 0;
844 data->vlan_proto = skb->vlan_proto;
845 data->mac_proto = mac_proto;
846 data->l2_len = hlen;
847 memcpy(&data->l2_data, skb->data, hlen);
848
849 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
850 skb_pull(skb, hlen);
851}
852
853static void ovs_fragment(struct net *net, struct vport *vport,
854 struct sk_buff *skb, u16 mru,
855 struct sw_flow_key *key)
856{
857 u16 orig_network_offset = 0;
858
859 if (eth_p_mpls(skb->protocol)) {
860 orig_network_offset = skb_network_offset(skb);
861 skb->network_header = skb->inner_network_header;
862 }
863
864 if (skb_network_offset(skb) > MAX_L2_LEN) {
865 OVS_NLERR(1, "L2 header too long to fragment");
866 goto err;
867 }
868
869 if (key->eth.type == htons(ETH_P_IP)) {
870 struct rtable ovs_rt = { 0 };
871 unsigned long orig_dst;
872
873 prepare_frag(vport, skb, orig_network_offset,
874 ovs_key_mac_proto(key));
875 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
876 DST_OBSOLETE_NONE, DST_NOCOUNT);
877 ovs_rt.dst.dev = vport->dev;
878
879 orig_dst = skb->_skb_refdst;
880 skb_dst_set_noref(skb, &ovs_rt.dst);
881 IPCB(skb)->frag_max_size = mru;
882
883 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
884 refdst_drop(orig_dst);
885 } else if (key->eth.type == htons(ETH_P_IPV6)) {
886 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
887 unsigned long orig_dst;
888 struct rt6_info ovs_rt;
889
890 if (!v6ops)
891 goto err;
892
893 prepare_frag(vport, skb, orig_network_offset,
894 ovs_key_mac_proto(key));
895 memset(&ovs_rt, 0, sizeof(ovs_rt));
896 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
897 DST_OBSOLETE_NONE, DST_NOCOUNT);
898 ovs_rt.dst.dev = vport->dev;
899
900 orig_dst = skb->_skb_refdst;
901 skb_dst_set_noref(skb, &ovs_rt.dst);
902 IP6CB(skb)->frag_max_size = mru;
903
904 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
905 refdst_drop(orig_dst);
906 } else {
907 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
908 ovs_vport_name(vport), ntohs(key->eth.type), mru,
909 vport->dev->mtu);
910 goto err;
911 }
912
913 return;
914err:
915 kfree_skb(skb);
916}
917
918static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
919 struct sw_flow_key *key)
920{
921 struct vport *vport = ovs_vport_rcu(dp, out_port);
922
923 if (likely(vport)) {
924 u16 mru = OVS_CB(skb)->mru;
925 u32 cutlen = OVS_CB(skb)->cutlen;
926
927 if (unlikely(cutlen > 0)) {
928 if (skb->len - cutlen > ovs_mac_header_len(key))
929 pskb_trim(skb, skb->len - cutlen);
930 else
931 pskb_trim(skb, ovs_mac_header_len(key));
932 }
933
934 /* Need to set the pkt_type to involve the routing layer. The
935 * packet movement through the OVS datapath doesn't generally
936 * use routing, but this is needed for tunnel cases.
937 */
938 skb->pkt_type = PACKET_OUTGOING;
939
940 if (likely(!mru ||
941 (skb->len <= mru + vport->dev->hard_header_len))) {
942 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
943 } else if (mru <= vport->dev->mtu) {
944 struct net *net = read_pnet(&dp->net);
945
946 ovs_fragment(net, vport, skb, mru, key);
947 } else {
948 kfree_skb(skb);
949 }
950 } else {
951 kfree_skb(skb);
952 }
953}
954
955static int output_userspace(struct datapath *dp, struct sk_buff *skb,
956 struct sw_flow_key *key, const struct nlattr *attr,
957 const struct nlattr *actions, int actions_len,
958 uint32_t cutlen)
959{
960 struct dp_upcall_info upcall;
961 const struct nlattr *a;
962 int rem;
963
964 memset(&upcall, 0, sizeof(upcall));
965 upcall.cmd = OVS_PACKET_CMD_ACTION;
966 upcall.mru = OVS_CB(skb)->mru;
967
968 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
969 a = nla_next(a, &rem)) {
970 switch (nla_type(a)) {
971 case OVS_USERSPACE_ATTR_USERDATA:
972 upcall.userdata = a;
973 break;
974
975 case OVS_USERSPACE_ATTR_PID:
976 upcall.portid = nla_get_u32(a);
977 break;
978
979 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
980 /* Get out tunnel info. */
981 struct vport *vport;
982
983 vport = ovs_vport_rcu(dp, nla_get_u32(a));
984 if (vport) {
985 int err;
986
987 err = dev_fill_metadata_dst(vport->dev, skb);
988 if (!err)
989 upcall.egress_tun_info = skb_tunnel_info(skb);
990 }
991
992 break;
993 }
994
995 case OVS_USERSPACE_ATTR_ACTIONS: {
996 /* Include actions. */
997 upcall.actions = actions;
998 upcall.actions_len = actions_len;
999 break;
1000 }
1001
1002 } /* End of switch. */
1003 }
1004
1005 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1006}
1007
1008/* When 'last' is true, sample() should always consume the 'skb'.
1009 * Otherwise, sample() should keep 'skb' intact regardless what
1010 * actions are executed within sample().
1011 */
1012static int sample(struct datapath *dp, struct sk_buff *skb,
1013 struct sw_flow_key *key, const struct nlattr *attr,
1014 bool last)
1015{
1016 struct nlattr *actions;
1017 struct nlattr *sample_arg;
1018 int rem = nla_len(attr);
1019 const struct sample_arg *arg;
1020 bool clone_flow_key;
1021
1022 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1023 sample_arg = nla_data(attr);
1024 arg = nla_data(sample_arg);
1025 actions = nla_next(sample_arg, &rem);
1026
1027 if ((arg->probability != U32_MAX) &&
1028 (!arg->probability || prandom_u32() > arg->probability)) {
1029 if (last)
1030 consume_skb(skb);
1031 return 0;
1032 }
1033
1034 clone_flow_key = !arg->exec;
1035 return clone_execute(dp, skb, key, 0, actions, rem, last,
1036 clone_flow_key);
1037}
1038
1039/* When 'last' is true, clone() should always consume the 'skb'.
1040 * Otherwise, clone() should keep 'skb' intact regardless what
1041 * actions are executed within clone().
1042 */
1043static int clone(struct datapath *dp, struct sk_buff *skb,
1044 struct sw_flow_key *key, const struct nlattr *attr,
1045 bool last)
1046{
1047 struct nlattr *actions;
1048 struct nlattr *clone_arg;
1049 int rem = nla_len(attr);
1050 bool dont_clone_flow_key;
1051
1052 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1053 clone_arg = nla_data(attr);
1054 dont_clone_flow_key = nla_get_u32(clone_arg);
1055 actions = nla_next(clone_arg, &rem);
1056
1057 return clone_execute(dp, skb, key, 0, actions, rem, last,
1058 !dont_clone_flow_key);
1059}
1060
1061static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1062 const struct nlattr *attr)
1063{
1064 struct ovs_action_hash *hash_act = nla_data(attr);
1065 u32 hash = 0;
1066
1067 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1068 hash = skb_get_hash(skb);
1069 hash = jhash_1word(hash, hash_act->hash_basis);
1070 if (!hash)
1071 hash = 0x1;
1072
1073 key->ovs_flow_hash = hash;
1074}
1075
1076static int execute_set_action(struct sk_buff *skb,
1077 struct sw_flow_key *flow_key,
1078 const struct nlattr *a)
1079{
1080 /* Only tunnel set execution is supported without a mask. */
1081 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1082 struct ovs_tunnel_info *tun = nla_data(a);
1083
1084 skb_dst_drop(skb);
1085 dst_hold((struct dst_entry *)tun->tun_dst);
1086 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1087 return 0;
1088 }
1089
1090 return -EINVAL;
1091}
1092
1093/* Mask is at the midpoint of the data. */
1094#define get_mask(a, type) ((const type)nla_data(a) + 1)
1095
1096static int execute_masked_set_action(struct sk_buff *skb,
1097 struct sw_flow_key *flow_key,
1098 const struct nlattr *a)
1099{
1100 int err = 0;
1101
1102 switch (nla_type(a)) {
1103 case OVS_KEY_ATTR_PRIORITY:
1104 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1105 *get_mask(a, u32 *));
1106 flow_key->phy.priority = skb->priority;
1107 break;
1108
1109 case OVS_KEY_ATTR_SKB_MARK:
1110 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1111 flow_key->phy.skb_mark = skb->mark;
1112 break;
1113
1114 case OVS_KEY_ATTR_TUNNEL_INFO:
1115 /* Masked data not supported for tunnel. */
1116 err = -EINVAL;
1117 break;
1118
1119 case OVS_KEY_ATTR_ETHERNET:
1120 err = set_eth_addr(skb, flow_key, nla_data(a),
1121 get_mask(a, struct ovs_key_ethernet *));
1122 break;
1123
1124 case OVS_KEY_ATTR_NSH:
1125 err = set_nsh(skb, flow_key, a);
1126 break;
1127
1128 case OVS_KEY_ATTR_IPV4:
1129 err = set_ipv4(skb, flow_key, nla_data(a),
1130 get_mask(a, struct ovs_key_ipv4 *));
1131 break;
1132
1133 case OVS_KEY_ATTR_IPV6:
1134 err = set_ipv6(skb, flow_key, nla_data(a),
1135 get_mask(a, struct ovs_key_ipv6 *));
1136 break;
1137
1138 case OVS_KEY_ATTR_TCP:
1139 err = set_tcp(skb, flow_key, nla_data(a),
1140 get_mask(a, struct ovs_key_tcp *));
1141 break;
1142
1143 case OVS_KEY_ATTR_UDP:
1144 err = set_udp(skb, flow_key, nla_data(a),
1145 get_mask(a, struct ovs_key_udp *));
1146 break;
1147
1148 case OVS_KEY_ATTR_SCTP:
1149 err = set_sctp(skb, flow_key, nla_data(a),
1150 get_mask(a, struct ovs_key_sctp *));
1151 break;
1152
1153 case OVS_KEY_ATTR_MPLS:
1154 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1155 __be32 *));
1156 break;
1157
1158 case OVS_KEY_ATTR_CT_STATE:
1159 case OVS_KEY_ATTR_CT_ZONE:
1160 case OVS_KEY_ATTR_CT_MARK:
1161 case OVS_KEY_ATTR_CT_LABELS:
1162 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1163 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1164 err = -EINVAL;
1165 break;
1166 }
1167
1168 return err;
1169}
1170
1171static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1172 struct sw_flow_key *key,
1173 const struct nlattr *a, bool last)
1174{
1175 u32 recirc_id;
1176
1177 if (!is_flow_key_valid(key)) {
1178 int err;
1179
1180 err = ovs_flow_key_update(skb, key);
1181 if (err)
1182 return err;
1183 }
1184 BUG_ON(!is_flow_key_valid(key));
1185
1186 recirc_id = nla_get_u32(a);
1187 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1188}
1189
1190static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1191 struct sw_flow_key *key,
1192 const struct nlattr *attr, bool last)
1193{
1194 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1195 const struct nlattr *actions, *cpl_arg;
1196 int len, max_len, rem = nla_len(attr);
1197 const struct check_pkt_len_arg *arg;
1198 bool clone_flow_key;
1199
1200 /* The first netlink attribute in 'attr' is always
1201 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1202 */
1203 cpl_arg = nla_data(attr);
1204 arg = nla_data(cpl_arg);
1205
1206 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1207 max_len = arg->pkt_len;
1208
1209 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1210 len <= max_len) {
1211 /* Second netlink attribute in 'attr' is always
1212 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1213 */
1214 actions = nla_next(cpl_arg, &rem);
1215 clone_flow_key = !arg->exec_for_lesser_equal;
1216 } else {
1217 /* Third netlink attribute in 'attr' is always
1218 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1219 */
1220 actions = nla_next(cpl_arg, &rem);
1221 actions = nla_next(actions, &rem);
1222 clone_flow_key = !arg->exec_for_greater;
1223 }
1224
1225 return clone_execute(dp, skb, key, 0, nla_data(actions),
1226 nla_len(actions), last, clone_flow_key);
1227}
1228
1229/* Execute a list of actions against 'skb'. */
1230static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1231 struct sw_flow_key *key,
1232 const struct nlattr *attr, int len)
1233{
1234 const struct nlattr *a;
1235 int rem;
1236
1237 for (a = attr, rem = len; rem > 0;
1238 a = nla_next(a, &rem)) {
1239 int err = 0;
1240
1241 switch (nla_type(a)) {
1242 case OVS_ACTION_ATTR_OUTPUT: {
1243 int port = nla_get_u32(a);
1244 struct sk_buff *clone;
1245
1246 /* Every output action needs a separate clone
1247 * of 'skb', In case the output action is the
1248 * last action, cloning can be avoided.
1249 */
1250 if (nla_is_last(a, rem)) {
1251 do_output(dp, skb, port, key);
1252 /* 'skb' has been used for output.
1253 */
1254 return 0;
1255 }
1256
1257 clone = skb_clone(skb, GFP_ATOMIC);
1258 if (clone)
1259 do_output(dp, clone, port, key);
1260 OVS_CB(skb)->cutlen = 0;
1261 break;
1262 }
1263
1264 case OVS_ACTION_ATTR_TRUNC: {
1265 struct ovs_action_trunc *trunc = nla_data(a);
1266
1267 if (skb->len > trunc->max_len)
1268 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1269 break;
1270 }
1271
1272 case OVS_ACTION_ATTR_USERSPACE:
1273 output_userspace(dp, skb, key, a, attr,
1274 len, OVS_CB(skb)->cutlen);
1275 OVS_CB(skb)->cutlen = 0;
1276 break;
1277
1278 case OVS_ACTION_ATTR_HASH:
1279 execute_hash(skb, key, a);
1280 break;
1281
1282 case OVS_ACTION_ATTR_PUSH_MPLS:
1283 err = push_mpls(skb, key, nla_data(a));
1284 break;
1285
1286 case OVS_ACTION_ATTR_POP_MPLS:
1287 err = pop_mpls(skb, key, nla_get_be16(a));
1288 break;
1289
1290 case OVS_ACTION_ATTR_PUSH_VLAN:
1291 err = push_vlan(skb, key, nla_data(a));
1292 break;
1293
1294 case OVS_ACTION_ATTR_POP_VLAN:
1295 err = pop_vlan(skb, key);
1296 break;
1297
1298 case OVS_ACTION_ATTR_RECIRC: {
1299 bool last = nla_is_last(a, rem);
1300
1301 err = execute_recirc(dp, skb, key, a, last);
1302 if (last) {
1303 /* If this is the last action, the skb has
1304 * been consumed or freed.
1305 * Return immediately.
1306 */
1307 return err;
1308 }
1309 break;
1310 }
1311
1312 case OVS_ACTION_ATTR_SET:
1313 err = execute_set_action(skb, key, nla_data(a));
1314 break;
1315
1316 case OVS_ACTION_ATTR_SET_MASKED:
1317 case OVS_ACTION_ATTR_SET_TO_MASKED:
1318 err = execute_masked_set_action(skb, key, nla_data(a));
1319 break;
1320
1321 case OVS_ACTION_ATTR_SAMPLE: {
1322 bool last = nla_is_last(a, rem);
1323
1324 err = sample(dp, skb, key, a, last);
1325 if (last)
1326 return err;
1327
1328 break;
1329 }
1330
1331 case OVS_ACTION_ATTR_CT:
1332 if (!is_flow_key_valid(key)) {
1333 err = ovs_flow_key_update(skb, key);
1334 if (err)
1335 return err;
1336 }
1337
1338 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1339 nla_data(a));
1340
1341 /* Hide stolen IP fragments from user space. */
1342 if (err)
1343 return err == -EINPROGRESS ? 0 : err;
1344 break;
1345
1346 case OVS_ACTION_ATTR_CT_CLEAR:
1347 err = ovs_ct_clear(skb, key);
1348 break;
1349
1350 case OVS_ACTION_ATTR_PUSH_ETH:
1351 err = push_eth(skb, key, nla_data(a));
1352 break;
1353
1354 case OVS_ACTION_ATTR_POP_ETH:
1355 err = pop_eth(skb, key);
1356 break;
1357
1358 case OVS_ACTION_ATTR_PUSH_NSH: {
1359 u8 buffer[NSH_HDR_MAX_LEN];
1360 struct nshhdr *nh = (struct nshhdr *)buffer;
1361
1362 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1363 NSH_HDR_MAX_LEN);
1364 if (unlikely(err))
1365 break;
1366 err = push_nsh(skb, key, nh);
1367 break;
1368 }
1369
1370 case OVS_ACTION_ATTR_POP_NSH:
1371 err = pop_nsh(skb, key);
1372 break;
1373
1374 case OVS_ACTION_ATTR_METER:
1375 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1376 consume_skb(skb);
1377 return 0;
1378 }
1379 break;
1380
1381 case OVS_ACTION_ATTR_CLONE: {
1382 bool last = nla_is_last(a, rem);
1383
1384 err = clone(dp, skb, key, a, last);
1385 if (last)
1386 return err;
1387
1388 break;
1389 }
1390
1391 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1392 bool last = nla_is_last(a, rem);
1393
1394 err = execute_check_pkt_len(dp, skb, key, a, last);
1395 if (last)
1396 return err;
1397
1398 break;
1399 }
1400 }
1401
1402 if (unlikely(err)) {
1403 kfree_skb(skb);
1404 return err;
1405 }
1406 }
1407
1408 consume_skb(skb);
1409 return 0;
1410}
1411
1412/* Execute the actions on the clone of the packet. The effect of the
1413 * execution does not affect the original 'skb' nor the original 'key'.
1414 *
1415 * The execution may be deferred in case the actions can not be executed
1416 * immediately.
1417 */
1418static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1419 struct sw_flow_key *key, u32 recirc_id,
1420 const struct nlattr *actions, int len,
1421 bool last, bool clone_flow_key)
1422{
1423 struct deferred_action *da;
1424 struct sw_flow_key *clone;
1425
1426 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1427 if (!skb) {
1428 /* Out of memory, skip this action.
1429 */
1430 return 0;
1431 }
1432
1433 /* When clone_flow_key is false, the 'key' will not be change
1434 * by the actions, then the 'key' can be used directly.
1435 * Otherwise, try to clone key from the next recursion level of
1436 * 'flow_keys'. If clone is successful, execute the actions
1437 * without deferring.
1438 */
1439 clone = clone_flow_key ? clone_key(key) : key;
1440 if (clone) {
1441 int err = 0;
1442
1443 if (actions) { /* Sample action */
1444 if (clone_flow_key)
1445 __this_cpu_inc(exec_actions_level);
1446
1447 err = do_execute_actions(dp, skb, clone,
1448 actions, len);
1449
1450 if (clone_flow_key)
1451 __this_cpu_dec(exec_actions_level);
1452 } else { /* Recirc action */
1453 clone->recirc_id = recirc_id;
1454 ovs_dp_process_packet(skb, clone);
1455 }
1456 return err;
1457 }
1458
1459 /* Out of 'flow_keys' space. Defer actions */
1460 da = add_deferred_actions(skb, key, actions, len);
1461 if (da) {
1462 if (!actions) { /* Recirc action */
1463 key = &da->pkt_key;
1464 key->recirc_id = recirc_id;
1465 }
1466 } else {
1467 /* Out of per CPU action FIFO space. Drop the 'skb' and
1468 * log an error.
1469 */
1470 kfree_skb(skb);
1471
1472 if (net_ratelimit()) {
1473 if (actions) { /* Sample action */
1474 pr_warn("%s: deferred action limit reached, drop sample action\n",
1475 ovs_dp_name(dp));
1476 } else { /* Recirc action */
1477 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1478 ovs_dp_name(dp));
1479 }
1480 }
1481 }
1482 return 0;
1483}
1484
1485static void process_deferred_actions(struct datapath *dp)
1486{
1487 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1488
1489 /* Do not touch the FIFO in case there is no deferred actions. */
1490 if (action_fifo_is_empty(fifo))
1491 return;
1492
1493 /* Finishing executing all deferred actions. */
1494 do {
1495 struct deferred_action *da = action_fifo_get(fifo);
1496 struct sk_buff *skb = da->skb;
1497 struct sw_flow_key *key = &da->pkt_key;
1498 const struct nlattr *actions = da->actions;
1499 int actions_len = da->actions_len;
1500
1501 if (actions)
1502 do_execute_actions(dp, skb, key, actions, actions_len);
1503 else
1504 ovs_dp_process_packet(skb, key);
1505 } while (!action_fifo_is_empty(fifo));
1506
1507 /* Reset FIFO for the next packet. */
1508 action_fifo_init(fifo);
1509}
1510
1511/* Execute a list of actions against 'skb'. */
1512int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1513 const struct sw_flow_actions *acts,
1514 struct sw_flow_key *key)
1515{
1516 int err, level;
1517
1518 level = __this_cpu_inc_return(exec_actions_level);
1519 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1520 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1521 ovs_dp_name(dp));
1522 kfree_skb(skb);
1523 err = -ENETDOWN;
1524 goto out;
1525 }
1526
1527 OVS_CB(skb)->acts_origlen = acts->orig_len;
1528 err = do_execute_actions(dp, skb, key,
1529 acts->actions, acts->actions_len);
1530
1531 if (level == 1)
1532 process_deferred_actions(dp);
1533
1534out:
1535 __this_cpu_dec(exec_actions_level);
1536 return err;
1537}
1538
1539int action_fifos_init(void)
1540{
1541 action_fifos = alloc_percpu(struct action_fifo);
1542 if (!action_fifos)
1543 return -ENOMEM;
1544
1545 flow_keys = alloc_percpu(struct action_flow_keys);
1546 if (!flow_keys) {
1547 free_percpu(action_fifos);
1548 return -ENOMEM;
1549 }
1550
1551 return 0;
1552}
1553
1554void action_fifos_exit(void)
1555{
1556 free_percpu(action_fifos);
1557 free_percpu(flow_keys);
1558}