wifi: cfg80211: rewrite merging of inherited elements
[platform/kernel/linux-starfive.git] / net / openvswitch / datapath.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2014 Nicira, Inc.
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/if_arp.h>
11 #include <linux/if_vlan.h>
12 #include <linux/in.h>
13 #include <linux/ip.h>
14 #include <linux/jhash.h>
15 #include <linux/delay.h>
16 #include <linux/time.h>
17 #include <linux/etherdevice.h>
18 #include <linux/genetlink.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/mutex.h>
22 #include <linux/percpu.h>
23 #include <linux/rcupdate.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/ethtool.h>
27 #include <linux/wait.h>
28 #include <asm/div64.h>
29 #include <linux/highmem.h>
30 #include <linux/netfilter_bridge.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/inetdevice.h>
33 #include <linux/list.h>
34 #include <linux/openvswitch.h>
35 #include <linux/rculist.h>
36 #include <linux/dmi.h>
37 #include <net/genetlink.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
40 #include <net/pkt_cls.h>
41
42 #include "datapath.h"
43 #include "flow.h"
44 #include "flow_table.h"
45 #include "flow_netlink.h"
46 #include "meter.h"
47 #include "openvswitch_trace.h"
48 #include "vport-internal_dev.h"
49 #include "vport-netdev.h"
50
51 unsigned int ovs_net_id __read_mostly;
52
53 static struct genl_family dp_packet_genl_family;
54 static struct genl_family dp_flow_genl_family;
55 static struct genl_family dp_datapath_genl_family;
56
57 static const struct nla_policy flow_policy[];
58
59 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
60         .name = OVS_FLOW_MCGROUP,
61 };
62
63 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
64         .name = OVS_DATAPATH_MCGROUP,
65 };
66
67 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
68         .name = OVS_VPORT_MCGROUP,
69 };
70
71 /* Check if need to build a reply message.
72  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
73 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
74                             unsigned int group)
75 {
76         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
77                genl_has_listeners(family, genl_info_net(info), group);
78 }
79
80 static void ovs_notify(struct genl_family *family,
81                        struct sk_buff *skb, struct genl_info *info)
82 {
83         genl_notify(family, skb, info, 0, GFP_KERNEL);
84 }
85
86 /**
87  * DOC: Locking:
88  *
89  * All writes e.g. Writes to device state (add/remove datapath, port, set
90  * operations on vports, etc.), Writes to other state (flow table
91  * modifications, set miscellaneous datapath parameters, etc.) are protected
92  * by ovs_lock.
93  *
94  * Reads are protected by RCU.
95  *
96  * There are a few special cases (mostly stats) that have their own
97  * synchronization but they nest under all of above and don't interact with
98  * each other.
99  *
100  * The RTNL lock nests inside ovs_mutex.
101  */
102
103 static DEFINE_MUTEX(ovs_mutex);
104
105 void ovs_lock(void)
106 {
107         mutex_lock(&ovs_mutex);
108 }
109
110 void ovs_unlock(void)
111 {
112         mutex_unlock(&ovs_mutex);
113 }
114
115 #ifdef CONFIG_LOCKDEP
116 int lockdep_ovsl_is_held(void)
117 {
118         if (debug_locks)
119                 return lockdep_is_held(&ovs_mutex);
120         else
121                 return 1;
122 }
123 #endif
124
125 static struct vport *new_vport(const struct vport_parms *);
126 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
127                              const struct sw_flow_key *,
128                              const struct dp_upcall_info *,
129                              uint32_t cutlen);
130 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
131                                   const struct sw_flow_key *,
132                                   const struct dp_upcall_info *,
133                                   uint32_t cutlen);
134
135 static void ovs_dp_masks_rebalance(struct work_struct *work);
136
137 static int ovs_dp_set_upcall_portids(struct datapath *, const struct nlattr *);
138
139 /* Must be called with rcu_read_lock or ovs_mutex. */
140 const char *ovs_dp_name(const struct datapath *dp)
141 {
142         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
143         return ovs_vport_name(vport);
144 }
145
146 static int get_dpifindex(const struct datapath *dp)
147 {
148         struct vport *local;
149         int ifindex;
150
151         rcu_read_lock();
152
153         local = ovs_vport_rcu(dp, OVSP_LOCAL);
154         if (local)
155                 ifindex = local->dev->ifindex;
156         else
157                 ifindex = 0;
158
159         rcu_read_unlock();
160
161         return ifindex;
162 }
163
164 static void destroy_dp_rcu(struct rcu_head *rcu)
165 {
166         struct datapath *dp = container_of(rcu, struct datapath, rcu);
167
168         ovs_flow_tbl_destroy(&dp->table);
169         free_percpu(dp->stats_percpu);
170         kfree(dp->ports);
171         ovs_meters_exit(dp);
172         kfree(rcu_dereference_raw(dp->upcall_portids));
173         kfree(dp);
174 }
175
176 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
177                                             u16 port_no)
178 {
179         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
180 }
181
182 /* Called with ovs_mutex or RCU read lock. */
183 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
184 {
185         struct vport *vport;
186         struct hlist_head *head;
187
188         head = vport_hash_bucket(dp, port_no);
189         hlist_for_each_entry_rcu(vport, head, dp_hash_node,
190                                  lockdep_ovsl_is_held()) {
191                 if (vport->port_no == port_no)
192                         return vport;
193         }
194         return NULL;
195 }
196
197 /* Called with ovs_mutex. */
198 static struct vport *new_vport(const struct vport_parms *parms)
199 {
200         struct vport *vport;
201
202         vport = ovs_vport_add(parms);
203         if (!IS_ERR(vport)) {
204                 struct datapath *dp = parms->dp;
205                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
206
207                 hlist_add_head_rcu(&vport->dp_hash_node, head);
208         }
209         return vport;
210 }
211
212 void ovs_dp_detach_port(struct vport *p)
213 {
214         ASSERT_OVSL();
215
216         /* First drop references to device. */
217         hlist_del_rcu(&p->dp_hash_node);
218
219         /* Then destroy it. */
220         ovs_vport_del(p);
221 }
222
223 /* Must be called with rcu_read_lock. */
224 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
225 {
226         const struct vport *p = OVS_CB(skb)->input_vport;
227         struct datapath *dp = p->dp;
228         struct sw_flow *flow;
229         struct sw_flow_actions *sf_acts;
230         struct dp_stats_percpu *stats;
231         u64 *stats_counter;
232         u32 n_mask_hit;
233         u32 n_cache_hit;
234         int error;
235
236         stats = this_cpu_ptr(dp->stats_percpu);
237
238         /* Look up flow. */
239         flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
240                                          &n_mask_hit, &n_cache_hit);
241         if (unlikely(!flow)) {
242                 struct dp_upcall_info upcall;
243
244                 memset(&upcall, 0, sizeof(upcall));
245                 upcall.cmd = OVS_PACKET_CMD_MISS;
246
247                 if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
248                         upcall.portid =
249                             ovs_dp_get_upcall_portid(dp, smp_processor_id());
250                 else
251                         upcall.portid = ovs_vport_find_upcall_portid(p, skb);
252
253                 upcall.mru = OVS_CB(skb)->mru;
254                 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
255                 switch (error) {
256                 case 0:
257                 case -EAGAIN:
258                 case -ERESTARTSYS:
259                 case -EINTR:
260                         consume_skb(skb);
261                         break;
262                 default:
263                         kfree_skb(skb);
264                         break;
265                 }
266                 stats_counter = &stats->n_missed;
267                 goto out;
268         }
269
270         ovs_flow_stats_update(flow, key->tp.flags, skb);
271         sf_acts = rcu_dereference(flow->sf_acts);
272         error = ovs_execute_actions(dp, skb, sf_acts, key);
273         if (unlikely(error))
274                 net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n",
275                                     ovs_dp_name(dp), error);
276
277         stats_counter = &stats->n_hit;
278
279 out:
280         /* Update datapath statistics. */
281         u64_stats_update_begin(&stats->syncp);
282         (*stats_counter)++;
283         stats->n_mask_hit += n_mask_hit;
284         stats->n_cache_hit += n_cache_hit;
285         u64_stats_update_end(&stats->syncp);
286 }
287
288 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
289                   const struct sw_flow_key *key,
290                   const struct dp_upcall_info *upcall_info,
291                   uint32_t cutlen)
292 {
293         struct dp_stats_percpu *stats;
294         int err;
295
296         if (trace_ovs_dp_upcall_enabled())
297                 trace_ovs_dp_upcall(dp, skb, key, upcall_info);
298
299         if (upcall_info->portid == 0) {
300                 err = -ENOTCONN;
301                 goto err;
302         }
303
304         if (!skb_is_gso(skb))
305                 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
306         else
307                 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
308         if (err)
309                 goto err;
310
311         return 0;
312
313 err:
314         stats = this_cpu_ptr(dp->stats_percpu);
315
316         u64_stats_update_begin(&stats->syncp);
317         stats->n_lost++;
318         u64_stats_update_end(&stats->syncp);
319
320         return err;
321 }
322
323 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
324                              const struct sw_flow_key *key,
325                              const struct dp_upcall_info *upcall_info,
326                              uint32_t cutlen)
327 {
328         unsigned int gso_type = skb_shinfo(skb)->gso_type;
329         struct sw_flow_key later_key;
330         struct sk_buff *segs, *nskb;
331         int err;
332
333         BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_GSO_CB_OFFSET);
334         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
335         if (IS_ERR(segs))
336                 return PTR_ERR(segs);
337         if (segs == NULL)
338                 return -EINVAL;
339
340         if (gso_type & SKB_GSO_UDP) {
341                 /* The initial flow key extracted by ovs_flow_key_extract()
342                  * in this case is for a first fragment, so we need to
343                  * properly mark later fragments.
344                  */
345                 later_key = *key;
346                 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
347         }
348
349         /* Queue all of the segments. */
350         skb_list_walk_safe(segs, skb, nskb) {
351                 if (gso_type & SKB_GSO_UDP && skb != segs)
352                         key = &later_key;
353
354                 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
355                 if (err)
356                         break;
357
358         }
359
360         /* Free all of the segments. */
361         skb_list_walk_safe(segs, skb, nskb) {
362                 if (err)
363                         kfree_skb(skb);
364                 else
365                         consume_skb(skb);
366         }
367         return err;
368 }
369
370 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
371                               unsigned int hdrlen, int actions_attrlen)
372 {
373         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
374                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
375                 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
376                 + nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */
377                 + nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */
378
379         /* OVS_PACKET_ATTR_USERDATA */
380         if (upcall_info->userdata)
381                 size += NLA_ALIGN(upcall_info->userdata->nla_len);
382
383         /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
384         if (upcall_info->egress_tun_info)
385                 size += nla_total_size(ovs_tun_key_attr_size());
386
387         /* OVS_PACKET_ATTR_ACTIONS */
388         if (upcall_info->actions_len)
389                 size += nla_total_size(actions_attrlen);
390
391         /* OVS_PACKET_ATTR_MRU */
392         if (upcall_info->mru)
393                 size += nla_total_size(sizeof(upcall_info->mru));
394
395         return size;
396 }
397
398 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
399 {
400         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
401                 size_t plen = NLA_ALIGN(skb->len) - skb->len;
402
403                 if (plen > 0)
404                         skb_put_zero(skb, plen);
405         }
406 }
407
408 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
409                                   const struct sw_flow_key *key,
410                                   const struct dp_upcall_info *upcall_info,
411                                   uint32_t cutlen)
412 {
413         struct ovs_header *upcall;
414         struct sk_buff *nskb = NULL;
415         struct sk_buff *user_skb = NULL; /* to be queued to userspace */
416         struct nlattr *nla;
417         size_t len;
418         unsigned int hlen;
419         int err, dp_ifindex;
420         u64 hash;
421
422         dp_ifindex = get_dpifindex(dp);
423         if (!dp_ifindex)
424                 return -ENODEV;
425
426         if (skb_vlan_tag_present(skb)) {
427                 nskb = skb_clone(skb, GFP_ATOMIC);
428                 if (!nskb)
429                         return -ENOMEM;
430
431                 nskb = __vlan_hwaccel_push_inside(nskb);
432                 if (!nskb)
433                         return -ENOMEM;
434
435                 skb = nskb;
436         }
437
438         if (nla_attr_size(skb->len) > USHRT_MAX) {
439                 err = -EFBIG;
440                 goto out;
441         }
442
443         /* Complete checksum if needed */
444         if (skb->ip_summed == CHECKSUM_PARTIAL &&
445             (err = skb_csum_hwoffload_help(skb, 0)))
446                 goto out;
447
448         /* Older versions of OVS user space enforce alignment of the last
449          * Netlink attribute to NLA_ALIGNTO which would require extensive
450          * padding logic. Only perform zerocopy if padding is not required.
451          */
452         if (dp->user_features & OVS_DP_F_UNALIGNED)
453                 hlen = skb_zerocopy_headlen(skb);
454         else
455                 hlen = skb->len;
456
457         len = upcall_msg_size(upcall_info, hlen - cutlen,
458                               OVS_CB(skb)->acts_origlen);
459         user_skb = genlmsg_new(len, GFP_ATOMIC);
460         if (!user_skb) {
461                 err = -ENOMEM;
462                 goto out;
463         }
464
465         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
466                              0, upcall_info->cmd);
467         if (!upcall) {
468                 err = -EINVAL;
469                 goto out;
470         }
471         upcall->dp_ifindex = dp_ifindex;
472
473         err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
474         if (err)
475                 goto out;
476
477         if (upcall_info->userdata)
478                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
479                           nla_len(upcall_info->userdata),
480                           nla_data(upcall_info->userdata));
481
482         if (upcall_info->egress_tun_info) {
483                 nla = nla_nest_start_noflag(user_skb,
484                                             OVS_PACKET_ATTR_EGRESS_TUN_KEY);
485                 if (!nla) {
486                         err = -EMSGSIZE;
487                         goto out;
488                 }
489                 err = ovs_nla_put_tunnel_info(user_skb,
490                                               upcall_info->egress_tun_info);
491                 if (err)
492                         goto out;
493
494                 nla_nest_end(user_skb, nla);
495         }
496
497         if (upcall_info->actions_len) {
498                 nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS);
499                 if (!nla) {
500                         err = -EMSGSIZE;
501                         goto out;
502                 }
503                 err = ovs_nla_put_actions(upcall_info->actions,
504                                           upcall_info->actions_len,
505                                           user_skb);
506                 if (!err)
507                         nla_nest_end(user_skb, nla);
508                 else
509                         nla_nest_cancel(user_skb, nla);
510         }
511
512         /* Add OVS_PACKET_ATTR_MRU */
513         if (upcall_info->mru &&
514             nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, upcall_info->mru)) {
515                 err = -ENOBUFS;
516                 goto out;
517         }
518
519         /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
520         if (cutlen > 0 &&
521             nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, skb->len)) {
522                 err = -ENOBUFS;
523                 goto out;
524         }
525
526         /* Add OVS_PACKET_ATTR_HASH */
527         hash = skb_get_hash_raw(skb);
528         if (skb->sw_hash)
529                 hash |= OVS_PACKET_HASH_SW_BIT;
530
531         if (skb->l4_hash)
532                 hash |= OVS_PACKET_HASH_L4_BIT;
533
534         if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) {
535                 err = -ENOBUFS;
536                 goto out;
537         }
538
539         /* Only reserve room for attribute header, packet data is added
540          * in skb_zerocopy() */
541         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
542                 err = -ENOBUFS;
543                 goto out;
544         }
545         nla->nla_len = nla_attr_size(skb->len - cutlen);
546
547         err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
548         if (err)
549                 goto out;
550
551         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
552         pad_packet(dp, user_skb);
553
554         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
555
556         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
557         user_skb = NULL;
558 out:
559         if (err)
560                 skb_tx_error(skb);
561         consume_skb(user_skb);
562         consume_skb(nskb);
563
564         return err;
565 }
566
567 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
568 {
569         struct ovs_header *ovs_header = info->userhdr;
570         struct net *net = sock_net(skb->sk);
571         struct nlattr **a = info->attrs;
572         struct sw_flow_actions *acts;
573         struct sk_buff *packet;
574         struct sw_flow *flow;
575         struct sw_flow_actions *sf_acts;
576         struct datapath *dp;
577         struct vport *input_vport;
578         u16 mru = 0;
579         u64 hash;
580         int len;
581         int err;
582         bool log = !a[OVS_PACKET_ATTR_PROBE];
583
584         err = -EINVAL;
585         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
586             !a[OVS_PACKET_ATTR_ACTIONS])
587                 goto err;
588
589         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
590         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
591         err = -ENOMEM;
592         if (!packet)
593                 goto err;
594         skb_reserve(packet, NET_IP_ALIGN);
595
596         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
597
598         /* Set packet's mru */
599         if (a[OVS_PACKET_ATTR_MRU]) {
600                 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
601                 packet->ignore_df = 1;
602         }
603         OVS_CB(packet)->mru = mru;
604
605         if (a[OVS_PACKET_ATTR_HASH]) {
606                 hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]);
607
608                 __skb_set_hash(packet, hash & 0xFFFFFFFFULL,
609                                !!(hash & OVS_PACKET_HASH_SW_BIT),
610                                !!(hash & OVS_PACKET_HASH_L4_BIT));
611         }
612
613         /* Build an sw_flow for sending this packet. */
614         flow = ovs_flow_alloc();
615         err = PTR_ERR(flow);
616         if (IS_ERR(flow))
617                 goto err_kfree_skb;
618
619         err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
620                                              packet, &flow->key, log);
621         if (err)
622                 goto err_flow_free;
623
624         err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
625                                    &flow->key, &acts, log);
626         if (err)
627                 goto err_flow_free;
628
629         rcu_assign_pointer(flow->sf_acts, acts);
630         packet->priority = flow->key.phy.priority;
631         packet->mark = flow->key.phy.skb_mark;
632
633         rcu_read_lock();
634         dp = get_dp_rcu(net, ovs_header->dp_ifindex);
635         err = -ENODEV;
636         if (!dp)
637                 goto err_unlock;
638
639         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
640         if (!input_vport)
641                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
642
643         if (!input_vport)
644                 goto err_unlock;
645
646         packet->dev = input_vport->dev;
647         OVS_CB(packet)->input_vport = input_vport;
648         sf_acts = rcu_dereference(flow->sf_acts);
649
650         local_bh_disable();
651         err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
652         local_bh_enable();
653         rcu_read_unlock();
654
655         ovs_flow_free(flow, false);
656         return err;
657
658 err_unlock:
659         rcu_read_unlock();
660 err_flow_free:
661         ovs_flow_free(flow, false);
662 err_kfree_skb:
663         kfree_skb(packet);
664 err:
665         return err;
666 }
667
668 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
669         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
670         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
671         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
672         [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
673         [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
674         [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
675 };
676
677 static const struct genl_small_ops dp_packet_genl_ops[] = {
678         { .cmd = OVS_PACKET_CMD_EXECUTE,
679           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
680           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
681           .doit = ovs_packet_cmd_execute
682         }
683 };
684
685 static struct genl_family dp_packet_genl_family __ro_after_init = {
686         .hdrsize = sizeof(struct ovs_header),
687         .name = OVS_PACKET_FAMILY,
688         .version = OVS_PACKET_VERSION,
689         .maxattr = OVS_PACKET_ATTR_MAX,
690         .policy = packet_policy,
691         .netnsok = true,
692         .parallel_ops = true,
693         .small_ops = dp_packet_genl_ops,
694         .n_small_ops = ARRAY_SIZE(dp_packet_genl_ops),
695         .resv_start_op = OVS_PACKET_CMD_EXECUTE + 1,
696         .module = THIS_MODULE,
697 };
698
699 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
700                          struct ovs_dp_megaflow_stats *mega_stats)
701 {
702         int i;
703
704         memset(mega_stats, 0, sizeof(*mega_stats));
705
706         stats->n_flows = ovs_flow_tbl_count(&dp->table);
707         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
708
709         stats->n_hit = stats->n_missed = stats->n_lost = 0;
710
711         for_each_possible_cpu(i) {
712                 const struct dp_stats_percpu *percpu_stats;
713                 struct dp_stats_percpu local_stats;
714                 unsigned int start;
715
716                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
717
718                 do {
719                         start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
720                         local_stats = *percpu_stats;
721                 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
722
723                 stats->n_hit += local_stats.n_hit;
724                 stats->n_missed += local_stats.n_missed;
725                 stats->n_lost += local_stats.n_lost;
726                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
727                 mega_stats->n_cache_hit += local_stats.n_cache_hit;
728         }
729 }
730
731 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
732 {
733         return ovs_identifier_is_ufid(sfid) &&
734                !(ufid_flags & OVS_UFID_F_OMIT_KEY);
735 }
736
737 static bool should_fill_mask(uint32_t ufid_flags)
738 {
739         return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
740 }
741
742 static bool should_fill_actions(uint32_t ufid_flags)
743 {
744         return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
745 }
746
747 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
748                                     const struct sw_flow_id *sfid,
749                                     uint32_t ufid_flags)
750 {
751         size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
752
753         /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
754          * see ovs_nla_put_identifier()
755          */
756         if (sfid && ovs_identifier_is_ufid(sfid))
757                 len += nla_total_size(sfid->ufid_len);
758         else
759                 len += nla_total_size(ovs_key_attr_size());
760
761         /* OVS_FLOW_ATTR_KEY */
762         if (!sfid || should_fill_key(sfid, ufid_flags))
763                 len += nla_total_size(ovs_key_attr_size());
764
765         /* OVS_FLOW_ATTR_MASK */
766         if (should_fill_mask(ufid_flags))
767                 len += nla_total_size(ovs_key_attr_size());
768
769         /* OVS_FLOW_ATTR_ACTIONS */
770         if (should_fill_actions(ufid_flags))
771                 len += nla_total_size(acts->orig_len);
772
773         return len
774                 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
775                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
776                 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
777 }
778
779 /* Called with ovs_mutex or RCU read lock. */
780 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
781                                    struct sk_buff *skb)
782 {
783         struct ovs_flow_stats stats;
784         __be16 tcp_flags;
785         unsigned long used;
786
787         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
788
789         if (used &&
790             nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
791                               OVS_FLOW_ATTR_PAD))
792                 return -EMSGSIZE;
793
794         if (stats.n_packets &&
795             nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
796                           sizeof(struct ovs_flow_stats), &stats,
797                           OVS_FLOW_ATTR_PAD))
798                 return -EMSGSIZE;
799
800         if ((u8)ntohs(tcp_flags) &&
801              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
802                 return -EMSGSIZE;
803
804         return 0;
805 }
806
807 /* Called with ovs_mutex or RCU read lock. */
808 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
809                                      struct sk_buff *skb, int skb_orig_len)
810 {
811         struct nlattr *start;
812         int err;
813
814         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
815          * this is the first flow to be dumped into 'skb'.  This is unusual for
816          * Netlink but individual action lists can be longer than
817          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
818          * The userspace caller can always fetch the actions separately if it
819          * really wants them.  (Most userspace callers in fact don't care.)
820          *
821          * This can only fail for dump operations because the skb is always
822          * properly sized for single flows.
823          */
824         start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
825         if (start) {
826                 const struct sw_flow_actions *sf_acts;
827
828                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
829                 err = ovs_nla_put_actions(sf_acts->actions,
830                                           sf_acts->actions_len, skb);
831
832                 if (!err)
833                         nla_nest_end(skb, start);
834                 else {
835                         if (skb_orig_len)
836                                 return err;
837
838                         nla_nest_cancel(skb, start);
839                 }
840         } else if (skb_orig_len) {
841                 return -EMSGSIZE;
842         }
843
844         return 0;
845 }
846
847 /* Called with ovs_mutex or RCU read lock. */
848 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
849                                   struct sk_buff *skb, u32 portid,
850                                   u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
851 {
852         const int skb_orig_len = skb->len;
853         struct ovs_header *ovs_header;
854         int err;
855
856         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
857                                  flags, cmd);
858         if (!ovs_header)
859                 return -EMSGSIZE;
860
861         ovs_header->dp_ifindex = dp_ifindex;
862
863         err = ovs_nla_put_identifier(flow, skb);
864         if (err)
865                 goto error;
866
867         if (should_fill_key(&flow->id, ufid_flags)) {
868                 err = ovs_nla_put_masked_key(flow, skb);
869                 if (err)
870                         goto error;
871         }
872
873         if (should_fill_mask(ufid_flags)) {
874                 err = ovs_nla_put_mask(flow, skb);
875                 if (err)
876                         goto error;
877         }
878
879         err = ovs_flow_cmd_fill_stats(flow, skb);
880         if (err)
881                 goto error;
882
883         if (should_fill_actions(ufid_flags)) {
884                 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
885                 if (err)
886                         goto error;
887         }
888
889         genlmsg_end(skb, ovs_header);
890         return 0;
891
892 error:
893         genlmsg_cancel(skb, ovs_header);
894         return err;
895 }
896
897 /* May not be called with RCU read lock. */
898 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
899                                                const struct sw_flow_id *sfid,
900                                                struct genl_info *info,
901                                                bool always,
902                                                uint32_t ufid_flags)
903 {
904         struct sk_buff *skb;
905         size_t len;
906
907         if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
908                 return NULL;
909
910         len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
911         skb = genlmsg_new(len, GFP_KERNEL);
912         if (!skb)
913                 return ERR_PTR(-ENOMEM);
914
915         return skb;
916 }
917
918 /* Called with ovs_mutex. */
919 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
920                                                int dp_ifindex,
921                                                struct genl_info *info, u8 cmd,
922                                                bool always, u32 ufid_flags)
923 {
924         struct sk_buff *skb;
925         int retval;
926
927         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
928                                       &flow->id, info, always, ufid_flags);
929         if (IS_ERR_OR_NULL(skb))
930                 return skb;
931
932         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
933                                         info->snd_portid, info->snd_seq, 0,
934                                         cmd, ufid_flags);
935         if (WARN_ON_ONCE(retval < 0)) {
936                 kfree_skb(skb);
937                 skb = ERR_PTR(retval);
938         }
939         return skb;
940 }
941
942 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
943 {
944         struct net *net = sock_net(skb->sk);
945         struct nlattr **a = info->attrs;
946         struct ovs_header *ovs_header = info->userhdr;
947         struct sw_flow *flow = NULL, *new_flow;
948         struct sw_flow_mask mask;
949         struct sk_buff *reply;
950         struct datapath *dp;
951         struct sw_flow_key *key;
952         struct sw_flow_actions *acts;
953         struct sw_flow_match match;
954         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
955         int error;
956         bool log = !a[OVS_FLOW_ATTR_PROBE];
957
958         /* Must have key and actions. */
959         error = -EINVAL;
960         if (!a[OVS_FLOW_ATTR_KEY]) {
961                 OVS_NLERR(log, "Flow key attr not present in new flow.");
962                 goto error;
963         }
964         if (!a[OVS_FLOW_ATTR_ACTIONS]) {
965                 OVS_NLERR(log, "Flow actions attr not present in new flow.");
966                 goto error;
967         }
968
969         /* Most of the time we need to allocate a new flow, do it before
970          * locking.
971          */
972         new_flow = ovs_flow_alloc();
973         if (IS_ERR(new_flow)) {
974                 error = PTR_ERR(new_flow);
975                 goto error;
976         }
977
978         /* Extract key. */
979         key = kzalloc(sizeof(*key), GFP_KERNEL);
980         if (!key) {
981                 error = -ENOMEM;
982                 goto err_kfree_flow;
983         }
984
985         ovs_match_init(&match, key, false, &mask);
986         error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
987                                   a[OVS_FLOW_ATTR_MASK], log);
988         if (error)
989                 goto err_kfree_key;
990
991         ovs_flow_mask_key(&new_flow->key, key, true, &mask);
992
993         /* Extract flow identifier. */
994         error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
995                                        key, log);
996         if (error)
997                 goto err_kfree_key;
998
999         /* Validate actions. */
1000         error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
1001                                      &new_flow->key, &acts, log);
1002         if (error) {
1003                 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
1004                 goto err_kfree_key;
1005         }
1006
1007         reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
1008                                         ufid_flags);
1009         if (IS_ERR(reply)) {
1010                 error = PTR_ERR(reply);
1011                 goto err_kfree_acts;
1012         }
1013
1014         ovs_lock();
1015         dp = get_dp(net, ovs_header->dp_ifindex);
1016         if (unlikely(!dp)) {
1017                 error = -ENODEV;
1018                 goto err_unlock_ovs;
1019         }
1020
1021         /* Check if this is a duplicate flow */
1022         if (ovs_identifier_is_ufid(&new_flow->id))
1023                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
1024         if (!flow)
1025                 flow = ovs_flow_tbl_lookup(&dp->table, key);
1026         if (likely(!flow)) {
1027                 rcu_assign_pointer(new_flow->sf_acts, acts);
1028
1029                 /* Put flow in bucket. */
1030                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1031                 if (unlikely(error)) {
1032                         acts = NULL;
1033                         goto err_unlock_ovs;
1034                 }
1035
1036                 if (unlikely(reply)) {
1037                         error = ovs_flow_cmd_fill_info(new_flow,
1038                                                        ovs_header->dp_ifindex,
1039                                                        reply, info->snd_portid,
1040                                                        info->snd_seq, 0,
1041                                                        OVS_FLOW_CMD_NEW,
1042                                                        ufid_flags);
1043                         BUG_ON(error < 0);
1044                 }
1045                 ovs_unlock();
1046         } else {
1047                 struct sw_flow_actions *old_acts;
1048
1049                 /* Bail out if we're not allowed to modify an existing flow.
1050                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1051                  * because Generic Netlink treats the latter as a dump
1052                  * request.  We also accept NLM_F_EXCL in case that bug ever
1053                  * gets fixed.
1054                  */
1055                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1056                                                          | NLM_F_EXCL))) {
1057                         error = -EEXIST;
1058                         goto err_unlock_ovs;
1059                 }
1060                 /* The flow identifier has to be the same for flow updates.
1061                  * Look for any overlapping flow.
1062                  */
1063                 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1064                         if (ovs_identifier_is_key(&flow->id))
1065                                 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1066                                                                  &match);
1067                         else /* UFID matches but key is different */
1068                                 flow = NULL;
1069                         if (!flow) {
1070                                 error = -ENOENT;
1071                                 goto err_unlock_ovs;
1072                         }
1073                 }
1074                 /* Update actions. */
1075                 old_acts = ovsl_dereference(flow->sf_acts);
1076                 rcu_assign_pointer(flow->sf_acts, acts);
1077
1078                 if (unlikely(reply)) {
1079                         error = ovs_flow_cmd_fill_info(flow,
1080                                                        ovs_header->dp_ifindex,
1081                                                        reply, info->snd_portid,
1082                                                        info->snd_seq, 0,
1083                                                        OVS_FLOW_CMD_NEW,
1084                                                        ufid_flags);
1085                         BUG_ON(error < 0);
1086                 }
1087                 ovs_unlock();
1088
1089                 ovs_nla_free_flow_actions_rcu(old_acts);
1090                 ovs_flow_free(new_flow, false);
1091         }
1092
1093         if (reply)
1094                 ovs_notify(&dp_flow_genl_family, reply, info);
1095
1096         kfree(key);
1097         return 0;
1098
1099 err_unlock_ovs:
1100         ovs_unlock();
1101         kfree_skb(reply);
1102 err_kfree_acts:
1103         ovs_nla_free_flow_actions(acts);
1104 err_kfree_key:
1105         kfree(key);
1106 err_kfree_flow:
1107         ovs_flow_free(new_flow, false);
1108 error:
1109         return error;
1110 }
1111
1112 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1113 static noinline_for_stack
1114 struct sw_flow_actions *get_flow_actions(struct net *net,
1115                                          const struct nlattr *a,
1116                                          const struct sw_flow_key *key,
1117                                          const struct sw_flow_mask *mask,
1118                                          bool log)
1119 {
1120         struct sw_flow_actions *acts;
1121         struct sw_flow_key masked_key;
1122         int error;
1123
1124         ovs_flow_mask_key(&masked_key, key, true, mask);
1125         error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1126         if (error) {
1127                 OVS_NLERR(log,
1128                           "Actions may not be safe on all matching packets");
1129                 return ERR_PTR(error);
1130         }
1131
1132         return acts;
1133 }
1134
1135 /* Factor out match-init and action-copy to avoid
1136  * "Wframe-larger-than=1024" warning. Because mask is only
1137  * used to get actions, we new a function to save some
1138  * stack space.
1139  *
1140  * If there are not key and action attrs, we return 0
1141  * directly. In the case, the caller will also not use the
1142  * match as before. If there is action attr, we try to get
1143  * actions and save them to *acts. Before returning from
1144  * the function, we reset the match->mask pointer. Because
1145  * we should not to return match object with dangling reference
1146  * to mask.
1147  * */
1148 static noinline_for_stack int
1149 ovs_nla_init_match_and_action(struct net *net,
1150                               struct sw_flow_match *match,
1151                               struct sw_flow_key *key,
1152                               struct nlattr **a,
1153                               struct sw_flow_actions **acts,
1154                               bool log)
1155 {
1156         struct sw_flow_mask mask;
1157         int error = 0;
1158
1159         if (a[OVS_FLOW_ATTR_KEY]) {
1160                 ovs_match_init(match, key, true, &mask);
1161                 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1162                                           a[OVS_FLOW_ATTR_MASK], log);
1163                 if (error)
1164                         goto error;
1165         }
1166
1167         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1168                 if (!a[OVS_FLOW_ATTR_KEY]) {
1169                         OVS_NLERR(log,
1170                                   "Flow key attribute not present in set flow.");
1171                         error = -EINVAL;
1172                         goto error;
1173                 }
1174
1175                 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1176                                          &mask, log);
1177                 if (IS_ERR(*acts)) {
1178                         error = PTR_ERR(*acts);
1179                         goto error;
1180                 }
1181         }
1182
1183         /* On success, error is 0. */
1184 error:
1185         match->mask = NULL;
1186         return error;
1187 }
1188
1189 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1190 {
1191         struct net *net = sock_net(skb->sk);
1192         struct nlattr **a = info->attrs;
1193         struct ovs_header *ovs_header = info->userhdr;
1194         struct sw_flow_key key;
1195         struct sw_flow *flow;
1196         struct sk_buff *reply = NULL;
1197         struct datapath *dp;
1198         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1199         struct sw_flow_match match;
1200         struct sw_flow_id sfid;
1201         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1202         int error = 0;
1203         bool log = !a[OVS_FLOW_ATTR_PROBE];
1204         bool ufid_present;
1205
1206         ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1207         if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
1208                 OVS_NLERR(log,
1209                           "Flow set message rejected, Key attribute missing.");
1210                 return -EINVAL;
1211         }
1212
1213         error = ovs_nla_init_match_and_action(net, &match, &key, a,
1214                                               &acts, log);
1215         if (error)
1216                 goto error;
1217
1218         if (acts) {
1219                 /* Can allocate before locking if have acts. */
1220                 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1221                                                 ufid_flags);
1222                 if (IS_ERR(reply)) {
1223                         error = PTR_ERR(reply);
1224                         goto err_kfree_acts;
1225                 }
1226         }
1227
1228         ovs_lock();
1229         dp = get_dp(net, ovs_header->dp_ifindex);
1230         if (unlikely(!dp)) {
1231                 error = -ENODEV;
1232                 goto err_unlock_ovs;
1233         }
1234         /* Check that the flow exists. */
1235         if (ufid_present)
1236                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1237         else
1238                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1239         if (unlikely(!flow)) {
1240                 error = -ENOENT;
1241                 goto err_unlock_ovs;
1242         }
1243
1244         /* Update actions, if present. */
1245         if (likely(acts)) {
1246                 old_acts = ovsl_dereference(flow->sf_acts);
1247                 rcu_assign_pointer(flow->sf_acts, acts);
1248
1249                 if (unlikely(reply)) {
1250                         error = ovs_flow_cmd_fill_info(flow,
1251                                                        ovs_header->dp_ifindex,
1252                                                        reply, info->snd_portid,
1253                                                        info->snd_seq, 0,
1254                                                        OVS_FLOW_CMD_SET,
1255                                                        ufid_flags);
1256                         BUG_ON(error < 0);
1257                 }
1258         } else {
1259                 /* Could not alloc without acts before locking. */
1260                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1261                                                 info, OVS_FLOW_CMD_SET, false,
1262                                                 ufid_flags);
1263
1264                 if (IS_ERR(reply)) {
1265                         error = PTR_ERR(reply);
1266                         goto err_unlock_ovs;
1267                 }
1268         }
1269
1270         /* Clear stats. */
1271         if (a[OVS_FLOW_ATTR_CLEAR])
1272                 ovs_flow_stats_clear(flow);
1273         ovs_unlock();
1274
1275         if (reply)
1276                 ovs_notify(&dp_flow_genl_family, reply, info);
1277         if (old_acts)
1278                 ovs_nla_free_flow_actions_rcu(old_acts);
1279
1280         return 0;
1281
1282 err_unlock_ovs:
1283         ovs_unlock();
1284         kfree_skb(reply);
1285 err_kfree_acts:
1286         ovs_nla_free_flow_actions(acts);
1287 error:
1288         return error;
1289 }
1290
1291 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1292 {
1293         struct nlattr **a = info->attrs;
1294         struct ovs_header *ovs_header = info->userhdr;
1295         struct net *net = sock_net(skb->sk);
1296         struct sw_flow_key key;
1297         struct sk_buff *reply;
1298         struct sw_flow *flow;
1299         struct datapath *dp;
1300         struct sw_flow_match match;
1301         struct sw_flow_id ufid;
1302         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1303         int err = 0;
1304         bool log = !a[OVS_FLOW_ATTR_PROBE];
1305         bool ufid_present;
1306
1307         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1308         if (a[OVS_FLOW_ATTR_KEY]) {
1309                 ovs_match_init(&match, &key, true, NULL);
1310                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1311                                         log);
1312         } else if (!ufid_present) {
1313                 OVS_NLERR(log,
1314                           "Flow get message rejected, Key attribute missing.");
1315                 err = -EINVAL;
1316         }
1317         if (err)
1318                 return err;
1319
1320         ovs_lock();
1321         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1322         if (!dp) {
1323                 err = -ENODEV;
1324                 goto unlock;
1325         }
1326
1327         if (ufid_present)
1328                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1329         else
1330                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1331         if (!flow) {
1332                 err = -ENOENT;
1333                 goto unlock;
1334         }
1335
1336         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1337                                         OVS_FLOW_CMD_GET, true, ufid_flags);
1338         if (IS_ERR(reply)) {
1339                 err = PTR_ERR(reply);
1340                 goto unlock;
1341         }
1342
1343         ovs_unlock();
1344         return genlmsg_reply(reply, info);
1345 unlock:
1346         ovs_unlock();
1347         return err;
1348 }
1349
1350 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1351 {
1352         struct nlattr **a = info->attrs;
1353         struct ovs_header *ovs_header = info->userhdr;
1354         struct net *net = sock_net(skb->sk);
1355         struct sw_flow_key key;
1356         struct sk_buff *reply;
1357         struct sw_flow *flow = NULL;
1358         struct datapath *dp;
1359         struct sw_flow_match match;
1360         struct sw_flow_id ufid;
1361         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1362         int err;
1363         bool log = !a[OVS_FLOW_ATTR_PROBE];
1364         bool ufid_present;
1365
1366         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1367         if (a[OVS_FLOW_ATTR_KEY]) {
1368                 ovs_match_init(&match, &key, true, NULL);
1369                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1370                                         NULL, log);
1371                 if (unlikely(err))
1372                         return err;
1373         }
1374
1375         ovs_lock();
1376         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1377         if (unlikely(!dp)) {
1378                 err = -ENODEV;
1379                 goto unlock;
1380         }
1381
1382         if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1383                 err = ovs_flow_tbl_flush(&dp->table);
1384                 goto unlock;
1385         }
1386
1387         if (ufid_present)
1388                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1389         else
1390                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1391         if (unlikely(!flow)) {
1392                 err = -ENOENT;
1393                 goto unlock;
1394         }
1395
1396         ovs_flow_tbl_remove(&dp->table, flow);
1397         ovs_unlock();
1398
1399         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1400                                         &flow->id, info, false, ufid_flags);
1401         if (likely(reply)) {
1402                 if (!IS_ERR(reply)) {
1403                         rcu_read_lock();        /*To keep RCU checker happy. */
1404                         err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1405                                                      reply, info->snd_portid,
1406                                                      info->snd_seq, 0,
1407                                                      OVS_FLOW_CMD_DEL,
1408                                                      ufid_flags);
1409                         rcu_read_unlock();
1410                         if (WARN_ON_ONCE(err < 0)) {
1411                                 kfree_skb(reply);
1412                                 goto out_free;
1413                         }
1414
1415                         ovs_notify(&dp_flow_genl_family, reply, info);
1416                 } else {
1417                         netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0,
1418                                         PTR_ERR(reply));
1419                 }
1420         }
1421
1422 out_free:
1423         ovs_flow_free(flow, true);
1424         return 0;
1425 unlock:
1426         ovs_unlock();
1427         return err;
1428 }
1429
1430 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1431 {
1432         struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1433         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1434         struct table_instance *ti;
1435         struct datapath *dp;
1436         u32 ufid_flags;
1437         int err;
1438
1439         err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
1440                                        OVS_FLOW_ATTR_MAX, flow_policy, NULL);
1441         if (err)
1442                 return err;
1443         ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1444
1445         rcu_read_lock();
1446         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1447         if (!dp) {
1448                 rcu_read_unlock();
1449                 return -ENODEV;
1450         }
1451
1452         ti = rcu_dereference(dp->table.ti);
1453         for (;;) {
1454                 struct sw_flow *flow;
1455                 u32 bucket, obj;
1456
1457                 bucket = cb->args[0];
1458                 obj = cb->args[1];
1459                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1460                 if (!flow)
1461                         break;
1462
1463                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1464                                            NETLINK_CB(cb->skb).portid,
1465                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1466                                            OVS_FLOW_CMD_GET, ufid_flags) < 0)
1467                         break;
1468
1469                 cb->args[0] = bucket;
1470                 cb->args[1] = obj;
1471         }
1472         rcu_read_unlock();
1473         return skb->len;
1474 }
1475
1476 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1477         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1478         [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1479         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1480         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1481         [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1482         [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1483         [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1484 };
1485
1486 static const struct genl_small_ops dp_flow_genl_ops[] = {
1487         { .cmd = OVS_FLOW_CMD_NEW,
1488           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1489           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1490           .doit = ovs_flow_cmd_new
1491         },
1492         { .cmd = OVS_FLOW_CMD_DEL,
1493           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1494           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1495           .doit = ovs_flow_cmd_del
1496         },
1497         { .cmd = OVS_FLOW_CMD_GET,
1498           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1499           .flags = 0,               /* OK for unprivileged users. */
1500           .doit = ovs_flow_cmd_get,
1501           .dumpit = ovs_flow_cmd_dump
1502         },
1503         { .cmd = OVS_FLOW_CMD_SET,
1504           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1505           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1506           .doit = ovs_flow_cmd_set,
1507         },
1508 };
1509
1510 static struct genl_family dp_flow_genl_family __ro_after_init = {
1511         .hdrsize = sizeof(struct ovs_header),
1512         .name = OVS_FLOW_FAMILY,
1513         .version = OVS_FLOW_VERSION,
1514         .maxattr = OVS_FLOW_ATTR_MAX,
1515         .policy = flow_policy,
1516         .netnsok = true,
1517         .parallel_ops = true,
1518         .small_ops = dp_flow_genl_ops,
1519         .n_small_ops = ARRAY_SIZE(dp_flow_genl_ops),
1520         .resv_start_op = OVS_FLOW_CMD_SET + 1,
1521         .mcgrps = &ovs_dp_flow_multicast_group,
1522         .n_mcgrps = 1,
1523         .module = THIS_MODULE,
1524 };
1525
1526 static size_t ovs_dp_cmd_msg_size(void)
1527 {
1528         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1529
1530         msgsize += nla_total_size(IFNAMSIZ);
1531         msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1532         msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1533         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1534         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */
1535         msgsize += nla_total_size(sizeof(u32) * nr_cpu_ids); /* OVS_DP_ATTR_PER_CPU_PIDS */
1536
1537         return msgsize;
1538 }
1539
1540 /* Called with ovs_mutex. */
1541 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1542                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1543 {
1544         struct ovs_header *ovs_header;
1545         struct ovs_dp_stats dp_stats;
1546         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1547         struct dp_nlsk_pids *pids = ovsl_dereference(dp->upcall_portids);
1548         int err, pids_len;
1549
1550         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1551                                  flags, cmd);
1552         if (!ovs_header)
1553                 goto error;
1554
1555         ovs_header->dp_ifindex = get_dpifindex(dp);
1556
1557         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1558         if (err)
1559                 goto nla_put_failure;
1560
1561         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1562         if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1563                           &dp_stats, OVS_DP_ATTR_PAD))
1564                 goto nla_put_failure;
1565
1566         if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1567                           sizeof(struct ovs_dp_megaflow_stats),
1568                           &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1569                 goto nla_put_failure;
1570
1571         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1572                 goto nla_put_failure;
1573
1574         if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE,
1575                         ovs_flow_tbl_masks_cache_size(&dp->table)))
1576                 goto nla_put_failure;
1577
1578         if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU && pids) {
1579                 pids_len = min(pids->n_pids, nr_cpu_ids) * sizeof(u32);
1580                 if (nla_put(skb, OVS_DP_ATTR_PER_CPU_PIDS, pids_len, &pids->pids))
1581                         goto nla_put_failure;
1582         }
1583
1584         genlmsg_end(skb, ovs_header);
1585         return 0;
1586
1587 nla_put_failure:
1588         genlmsg_cancel(skb, ovs_header);
1589 error:
1590         return -EMSGSIZE;
1591 }
1592
1593 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1594 {
1595         return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1596 }
1597
1598 /* Called with rcu_read_lock or ovs_mutex. */
1599 static struct datapath *lookup_datapath(struct net *net,
1600                                         const struct ovs_header *ovs_header,
1601                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1602 {
1603         struct datapath *dp;
1604
1605         if (!a[OVS_DP_ATTR_NAME])
1606                 dp = get_dp(net, ovs_header->dp_ifindex);
1607         else {
1608                 struct vport *vport;
1609
1610                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1611                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1612         }
1613         return dp ? dp : ERR_PTR(-ENODEV);
1614 }
1615
1616 static void ovs_dp_reset_user_features(struct sk_buff *skb,
1617                                        struct genl_info *info)
1618 {
1619         struct datapath *dp;
1620
1621         dp = lookup_datapath(sock_net(skb->sk), info->userhdr,
1622                              info->attrs);
1623         if (IS_ERR(dp))
1624                 return;
1625
1626         pr_warn("%s: Dropping previously announced user features\n",
1627                 ovs_dp_name(dp));
1628         dp->user_features = 0;
1629 }
1630
1631 static int ovs_dp_set_upcall_portids(struct datapath *dp,
1632                               const struct nlattr *ids)
1633 {
1634         struct dp_nlsk_pids *old, *dp_nlsk_pids;
1635
1636         if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
1637                 return -EINVAL;
1638
1639         old = ovsl_dereference(dp->upcall_portids);
1640
1641         dp_nlsk_pids = kmalloc(sizeof(*dp_nlsk_pids) + nla_len(ids),
1642                                GFP_KERNEL);
1643         if (!dp_nlsk_pids)
1644                 return -ENOMEM;
1645
1646         dp_nlsk_pids->n_pids = nla_len(ids) / sizeof(u32);
1647         nla_memcpy(dp_nlsk_pids->pids, ids, nla_len(ids));
1648
1649         rcu_assign_pointer(dp->upcall_portids, dp_nlsk_pids);
1650
1651         kfree_rcu(old, rcu);
1652
1653         return 0;
1654 }
1655
1656 u32 ovs_dp_get_upcall_portid(const struct datapath *dp, uint32_t cpu_id)
1657 {
1658         struct dp_nlsk_pids *dp_nlsk_pids;
1659
1660         dp_nlsk_pids = rcu_dereference(dp->upcall_portids);
1661
1662         if (dp_nlsk_pids) {
1663                 if (cpu_id < dp_nlsk_pids->n_pids) {
1664                         return dp_nlsk_pids->pids[cpu_id];
1665                 } else if (dp_nlsk_pids->n_pids > 0 &&
1666                            cpu_id >= dp_nlsk_pids->n_pids) {
1667                         /* If the number of netlink PIDs is mismatched with
1668                          * the number of CPUs as seen by the kernel, log this
1669                          * and send the upcall to an arbitrary socket (0) in
1670                          * order to not drop packets
1671                          */
1672                         pr_info_ratelimited("cpu_id mismatch with handler threads");
1673                         return dp_nlsk_pids->pids[cpu_id %
1674                                                   dp_nlsk_pids->n_pids];
1675                 } else {
1676                         return 0;
1677                 }
1678         } else {
1679                 return 0;
1680         }
1681 }
1682
1683 static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1684 {
1685         u32 user_features = 0, old_features = dp->user_features;
1686         int err;
1687
1688         if (a[OVS_DP_ATTR_USER_FEATURES]) {
1689                 user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1690
1691                 if (user_features & ~(OVS_DP_F_VPORT_PIDS |
1692                                       OVS_DP_F_UNALIGNED |
1693                                       OVS_DP_F_TC_RECIRC_SHARING |
1694                                       OVS_DP_F_DISPATCH_UPCALL_PER_CPU))
1695                         return -EOPNOTSUPP;
1696
1697 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1698                 if (user_features & OVS_DP_F_TC_RECIRC_SHARING)
1699                         return -EOPNOTSUPP;
1700 #endif
1701         }
1702
1703         if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) {
1704                 int err;
1705                 u32 cache_size;
1706
1707                 cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]);
1708                 err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
1709                 if (err)
1710                         return err;
1711         }
1712
1713         dp->user_features = user_features;
1714
1715         if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU &&
1716             a[OVS_DP_ATTR_PER_CPU_PIDS]) {
1717                 /* Upcall Netlink Port IDs have been updated */
1718                 err = ovs_dp_set_upcall_portids(dp,
1719                                                 a[OVS_DP_ATTR_PER_CPU_PIDS]);
1720                 if (err)
1721                         return err;
1722         }
1723
1724         if ((dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) &&
1725             !(old_features & OVS_DP_F_TC_RECIRC_SHARING))
1726                 tc_skb_ext_tc_enable();
1727         else if (!(dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) &&
1728                  (old_features & OVS_DP_F_TC_RECIRC_SHARING))
1729                 tc_skb_ext_tc_disable();
1730
1731         return 0;
1732 }
1733
1734 static int ovs_dp_stats_init(struct datapath *dp)
1735 {
1736         dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1737         if (!dp->stats_percpu)
1738                 return -ENOMEM;
1739
1740         return 0;
1741 }
1742
1743 static int ovs_dp_vport_init(struct datapath *dp)
1744 {
1745         int i;
1746
1747         dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1748                                   sizeof(struct hlist_head),
1749                                   GFP_KERNEL);
1750         if (!dp->ports)
1751                 return -ENOMEM;
1752
1753         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1754                 INIT_HLIST_HEAD(&dp->ports[i]);
1755
1756         return 0;
1757 }
1758
1759 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1760 {
1761         struct nlattr **a = info->attrs;
1762         struct vport_parms parms;
1763         struct sk_buff *reply;
1764         struct datapath *dp;
1765         struct vport *vport;
1766         struct ovs_net *ovs_net;
1767         int err;
1768
1769         err = -EINVAL;
1770         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1771                 goto err;
1772
1773         reply = ovs_dp_cmd_alloc_info();
1774         if (!reply)
1775                 return -ENOMEM;
1776
1777         err = -ENOMEM;
1778         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1779         if (dp == NULL)
1780                 goto err_destroy_reply;
1781
1782         ovs_dp_set_net(dp, sock_net(skb->sk));
1783
1784         /* Allocate table. */
1785         err = ovs_flow_tbl_init(&dp->table);
1786         if (err)
1787                 goto err_destroy_dp;
1788
1789         err = ovs_dp_stats_init(dp);
1790         if (err)
1791                 goto err_destroy_table;
1792
1793         err = ovs_dp_vport_init(dp);
1794         if (err)
1795                 goto err_destroy_stats;
1796
1797         err = ovs_meters_init(dp);
1798         if (err)
1799                 goto err_destroy_ports;
1800
1801         /* Set up our datapath device. */
1802         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1803         parms.type = OVS_VPORT_TYPE_INTERNAL;
1804         parms.options = NULL;
1805         parms.dp = dp;
1806         parms.port_no = OVSP_LOCAL;
1807         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1808         parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX]
1809                 ? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0;
1810
1811         /* So far only local changes have been made, now need the lock. */
1812         ovs_lock();
1813
1814         err = ovs_dp_change(dp, a);
1815         if (err)
1816                 goto err_unlock_and_destroy_meters;
1817
1818         vport = new_vport(&parms);
1819         if (IS_ERR(vport)) {
1820                 err = PTR_ERR(vport);
1821                 if (err == -EBUSY)
1822                         err = -EEXIST;
1823
1824                 if (err == -EEXIST) {
1825                         /* An outdated user space instance that does not understand
1826                          * the concept of user_features has attempted to create a new
1827                          * datapath and is likely to reuse it. Drop all user features.
1828                          */
1829                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1830                                 ovs_dp_reset_user_features(skb, info);
1831                 }
1832
1833                 goto err_destroy_portids;
1834         }
1835
1836         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1837                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1838         BUG_ON(err < 0);
1839
1840         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1841         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1842
1843         ovs_unlock();
1844
1845         ovs_notify(&dp_datapath_genl_family, reply, info);
1846         return 0;
1847
1848 err_destroy_portids:
1849         kfree(rcu_dereference_raw(dp->upcall_portids));
1850 err_unlock_and_destroy_meters:
1851         ovs_unlock();
1852         ovs_meters_exit(dp);
1853 err_destroy_ports:
1854         kfree(dp->ports);
1855 err_destroy_stats:
1856         free_percpu(dp->stats_percpu);
1857 err_destroy_table:
1858         ovs_flow_tbl_destroy(&dp->table);
1859 err_destroy_dp:
1860         kfree(dp);
1861 err_destroy_reply:
1862         kfree_skb(reply);
1863 err:
1864         return err;
1865 }
1866
1867 /* Called with ovs_mutex. */
1868 static void __dp_destroy(struct datapath *dp)
1869 {
1870         struct flow_table *table = &dp->table;
1871         int i;
1872
1873         if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
1874                 tc_skb_ext_tc_disable();
1875
1876         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1877                 struct vport *vport;
1878                 struct hlist_node *n;
1879
1880                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1881                         if (vport->port_no != OVSP_LOCAL)
1882                                 ovs_dp_detach_port(vport);
1883         }
1884
1885         list_del_rcu(&dp->list_node);
1886
1887         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1888          * all ports in datapath are destroyed first before freeing datapath.
1889          */
1890         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1891
1892         /* Flush sw_flow in the tables. RCU cb only releases resource
1893          * such as dp, ports and tables. That may avoid some issues
1894          * such as RCU usage warning.
1895          */
1896         table_instance_flow_flush(table, ovsl_dereference(table->ti),
1897                                   ovsl_dereference(table->ufid_ti));
1898
1899         /* RCU destroy the ports, meters and flow tables. */
1900         call_rcu(&dp->rcu, destroy_dp_rcu);
1901 }
1902
1903 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1904 {
1905         struct sk_buff *reply;
1906         struct datapath *dp;
1907         int err;
1908
1909         reply = ovs_dp_cmd_alloc_info();
1910         if (!reply)
1911                 return -ENOMEM;
1912
1913         ovs_lock();
1914         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1915         err = PTR_ERR(dp);
1916         if (IS_ERR(dp))
1917                 goto err_unlock_free;
1918
1919         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1920                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1921         BUG_ON(err < 0);
1922
1923         __dp_destroy(dp);
1924         ovs_unlock();
1925
1926         ovs_notify(&dp_datapath_genl_family, reply, info);
1927
1928         return 0;
1929
1930 err_unlock_free:
1931         ovs_unlock();
1932         kfree_skb(reply);
1933         return err;
1934 }
1935
1936 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1937 {
1938         struct sk_buff *reply;
1939         struct datapath *dp;
1940         int err;
1941
1942         reply = ovs_dp_cmd_alloc_info();
1943         if (!reply)
1944                 return -ENOMEM;
1945
1946         ovs_lock();
1947         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1948         err = PTR_ERR(dp);
1949         if (IS_ERR(dp))
1950                 goto err_unlock_free;
1951
1952         err = ovs_dp_change(dp, info->attrs);
1953         if (err)
1954                 goto err_unlock_free;
1955
1956         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1957                                    info->snd_seq, 0, OVS_DP_CMD_SET);
1958         BUG_ON(err < 0);
1959
1960         ovs_unlock();
1961         ovs_notify(&dp_datapath_genl_family, reply, info);
1962
1963         return 0;
1964
1965 err_unlock_free:
1966         ovs_unlock();
1967         kfree_skb(reply);
1968         return err;
1969 }
1970
1971 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1972 {
1973         struct sk_buff *reply;
1974         struct datapath *dp;
1975         int err;
1976
1977         reply = ovs_dp_cmd_alloc_info();
1978         if (!reply)
1979                 return -ENOMEM;
1980
1981         ovs_lock();
1982         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1983         if (IS_ERR(dp)) {
1984                 err = PTR_ERR(dp);
1985                 goto err_unlock_free;
1986         }
1987         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1988                                    info->snd_seq, 0, OVS_DP_CMD_GET);
1989         BUG_ON(err < 0);
1990         ovs_unlock();
1991
1992         return genlmsg_reply(reply, info);
1993
1994 err_unlock_free:
1995         ovs_unlock();
1996         kfree_skb(reply);
1997         return err;
1998 }
1999
2000 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2001 {
2002         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2003         struct datapath *dp;
2004         int skip = cb->args[0];
2005         int i = 0;
2006
2007         ovs_lock();
2008         list_for_each_entry(dp, &ovs_net->dps, list_node) {
2009                 if (i >= skip &&
2010                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
2011                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
2012                                          OVS_DP_CMD_GET) < 0)
2013                         break;
2014                 i++;
2015         }
2016         ovs_unlock();
2017
2018         cb->args[0] = i;
2019
2020         return skb->len;
2021 }
2022
2023 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
2024         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2025         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2026         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
2027         [OVS_DP_ATTR_MASKS_CACHE_SIZE] =  NLA_POLICY_RANGE(NLA_U32, 0,
2028                 PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
2029         [OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 },
2030 };
2031
2032 static const struct genl_small_ops dp_datapath_genl_ops[] = {
2033         { .cmd = OVS_DP_CMD_NEW,
2034           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2035           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2036           .doit = ovs_dp_cmd_new
2037         },
2038         { .cmd = OVS_DP_CMD_DEL,
2039           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2040           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2041           .doit = ovs_dp_cmd_del
2042         },
2043         { .cmd = OVS_DP_CMD_GET,
2044           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2045           .flags = 0,               /* OK for unprivileged users. */
2046           .doit = ovs_dp_cmd_get,
2047           .dumpit = ovs_dp_cmd_dump
2048         },
2049         { .cmd = OVS_DP_CMD_SET,
2050           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2051           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2052           .doit = ovs_dp_cmd_set,
2053         },
2054 };
2055
2056 static struct genl_family dp_datapath_genl_family __ro_after_init = {
2057         .hdrsize = sizeof(struct ovs_header),
2058         .name = OVS_DATAPATH_FAMILY,
2059         .version = OVS_DATAPATH_VERSION,
2060         .maxattr = OVS_DP_ATTR_MAX,
2061         .policy = datapath_policy,
2062         .netnsok = true,
2063         .parallel_ops = true,
2064         .small_ops = dp_datapath_genl_ops,
2065         .n_small_ops = ARRAY_SIZE(dp_datapath_genl_ops),
2066         .resv_start_op = OVS_DP_CMD_SET + 1,
2067         .mcgrps = &ovs_dp_datapath_multicast_group,
2068         .n_mcgrps = 1,
2069         .module = THIS_MODULE,
2070 };
2071
2072 /* Called with ovs_mutex or RCU read lock. */
2073 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
2074                                    struct net *net, u32 portid, u32 seq,
2075                                    u32 flags, u8 cmd, gfp_t gfp)
2076 {
2077         struct ovs_header *ovs_header;
2078         struct ovs_vport_stats vport_stats;
2079         int err;
2080
2081         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
2082                                  flags, cmd);
2083         if (!ovs_header)
2084                 return -EMSGSIZE;
2085
2086         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
2087
2088         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
2089             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
2090             nla_put_string(skb, OVS_VPORT_ATTR_NAME,
2091                            ovs_vport_name(vport)) ||
2092             nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
2093                 goto nla_put_failure;
2094
2095         if (!net_eq(net, dev_net(vport->dev))) {
2096                 int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
2097
2098                 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
2099                         goto nla_put_failure;
2100         }
2101
2102         ovs_vport_get_stats(vport, &vport_stats);
2103         if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
2104                           sizeof(struct ovs_vport_stats), &vport_stats,
2105                           OVS_VPORT_ATTR_PAD))
2106                 goto nla_put_failure;
2107
2108         if (ovs_vport_get_upcall_portids(vport, skb))
2109                 goto nla_put_failure;
2110
2111         err = ovs_vport_get_options(vport, skb);
2112         if (err == -EMSGSIZE)
2113                 goto error;
2114
2115         genlmsg_end(skb, ovs_header);
2116         return 0;
2117
2118 nla_put_failure:
2119         err = -EMSGSIZE;
2120 error:
2121         genlmsg_cancel(skb, ovs_header);
2122         return err;
2123 }
2124
2125 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
2126 {
2127         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2128 }
2129
2130 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
2131 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
2132                                          u32 portid, u32 seq, u8 cmd)
2133 {
2134         struct sk_buff *skb;
2135         int retval;
2136
2137         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2138         if (!skb)
2139                 return ERR_PTR(-ENOMEM);
2140
2141         retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
2142                                          GFP_KERNEL);
2143         BUG_ON(retval < 0);
2144
2145         return skb;
2146 }
2147
2148 /* Called with ovs_mutex or RCU read lock. */
2149 static struct vport *lookup_vport(struct net *net,
2150                                   const struct ovs_header *ovs_header,
2151                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
2152 {
2153         struct datapath *dp;
2154         struct vport *vport;
2155
2156         if (a[OVS_VPORT_ATTR_IFINDEX])
2157                 return ERR_PTR(-EOPNOTSUPP);
2158         if (a[OVS_VPORT_ATTR_NAME]) {
2159                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
2160                 if (!vport)
2161                         return ERR_PTR(-ENODEV);
2162                 if (ovs_header->dp_ifindex &&
2163                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
2164                         return ERR_PTR(-ENODEV);
2165                 return vport;
2166         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
2167                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2168
2169                 if (port_no >= DP_MAX_PORTS)
2170                         return ERR_PTR(-EFBIG);
2171
2172                 dp = get_dp(net, ovs_header->dp_ifindex);
2173                 if (!dp)
2174                         return ERR_PTR(-ENODEV);
2175
2176                 vport = ovs_vport_ovsl_rcu(dp, port_no);
2177                 if (!vport)
2178                         return ERR_PTR(-ENODEV);
2179                 return vport;
2180         } else
2181                 return ERR_PTR(-EINVAL);
2182
2183 }
2184
2185 static unsigned int ovs_get_max_headroom(struct datapath *dp)
2186 {
2187         unsigned int dev_headroom, max_headroom = 0;
2188         struct net_device *dev;
2189         struct vport *vport;
2190         int i;
2191
2192         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2193                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
2194                                          lockdep_ovsl_is_held()) {
2195                         dev = vport->dev;
2196                         dev_headroom = netdev_get_fwd_headroom(dev);
2197                         if (dev_headroom > max_headroom)
2198                                 max_headroom = dev_headroom;
2199                 }
2200         }
2201
2202         return max_headroom;
2203 }
2204
2205 /* Called with ovs_mutex */
2206 static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
2207 {
2208         struct vport *vport;
2209         int i;
2210
2211         dp->max_headroom = new_headroom;
2212         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2213                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
2214                                          lockdep_ovsl_is_held())
2215                         netdev_set_rx_headroom(vport->dev, new_headroom);
2216         }
2217 }
2218
2219 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2220 {
2221         struct nlattr **a = info->attrs;
2222         struct ovs_header *ovs_header = info->userhdr;
2223         struct vport_parms parms;
2224         struct sk_buff *reply;
2225         struct vport *vport;
2226         struct datapath *dp;
2227         unsigned int new_headroom;
2228         u32 port_no;
2229         int err;
2230
2231         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2232             !a[OVS_VPORT_ATTR_UPCALL_PID])
2233                 return -EINVAL;
2234
2235         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2236
2237         if (a[OVS_VPORT_ATTR_IFINDEX] && parms.type != OVS_VPORT_TYPE_INTERNAL)
2238                 return -EOPNOTSUPP;
2239
2240         port_no = a[OVS_VPORT_ATTR_PORT_NO]
2241                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2242         if (port_no >= DP_MAX_PORTS)
2243                 return -EFBIG;
2244
2245         reply = ovs_vport_cmd_alloc_info();
2246         if (!reply)
2247                 return -ENOMEM;
2248
2249         ovs_lock();
2250 restart:
2251         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2252         err = -ENODEV;
2253         if (!dp)
2254                 goto exit_unlock_free;
2255
2256         if (port_no) {
2257                 vport = ovs_vport_ovsl(dp, port_no);
2258                 err = -EBUSY;
2259                 if (vport)
2260                         goto exit_unlock_free;
2261         } else {
2262                 for (port_no = 1; ; port_no++) {
2263                         if (port_no >= DP_MAX_PORTS) {
2264                                 err = -EFBIG;
2265                                 goto exit_unlock_free;
2266                         }
2267                         vport = ovs_vport_ovsl(dp, port_no);
2268                         if (!vport)
2269                                 break;
2270                 }
2271         }
2272
2273         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2274         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2275         parms.dp = dp;
2276         parms.port_no = port_no;
2277         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2278         parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX]
2279                 ? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
2280
2281         vport = new_vport(&parms);
2282         err = PTR_ERR(vport);
2283         if (IS_ERR(vport)) {
2284                 if (err == -EAGAIN)
2285                         goto restart;
2286                 goto exit_unlock_free;
2287         }
2288
2289         err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2290                                       info->snd_portid, info->snd_seq, 0,
2291                                       OVS_VPORT_CMD_NEW, GFP_KERNEL);
2292
2293         new_headroom = netdev_get_fwd_headroom(vport->dev);
2294
2295         if (new_headroom > dp->max_headroom)
2296                 ovs_update_headroom(dp, new_headroom);
2297         else
2298                 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2299
2300         BUG_ON(err < 0);
2301         ovs_unlock();
2302
2303         ovs_notify(&dp_vport_genl_family, reply, info);
2304         return 0;
2305
2306 exit_unlock_free:
2307         ovs_unlock();
2308         kfree_skb(reply);
2309         return err;
2310 }
2311
2312 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2313 {
2314         struct nlattr **a = info->attrs;
2315         struct sk_buff *reply;
2316         struct vport *vport;
2317         int err;
2318
2319         reply = ovs_vport_cmd_alloc_info();
2320         if (!reply)
2321                 return -ENOMEM;
2322
2323         ovs_lock();
2324         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2325         err = PTR_ERR(vport);
2326         if (IS_ERR(vport))
2327                 goto exit_unlock_free;
2328
2329         if (a[OVS_VPORT_ATTR_TYPE] &&
2330             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2331                 err = -EINVAL;
2332                 goto exit_unlock_free;
2333         }
2334
2335         if (a[OVS_VPORT_ATTR_OPTIONS]) {
2336                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2337                 if (err)
2338                         goto exit_unlock_free;
2339         }
2340
2341
2342         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2343                 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2344
2345                 err = ovs_vport_set_upcall_portids(vport, ids);
2346                 if (err)
2347                         goto exit_unlock_free;
2348         }
2349
2350         err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2351                                       info->snd_portid, info->snd_seq, 0,
2352                                       OVS_VPORT_CMD_SET, GFP_KERNEL);
2353         BUG_ON(err < 0);
2354
2355         ovs_unlock();
2356         ovs_notify(&dp_vport_genl_family, reply, info);
2357         return 0;
2358
2359 exit_unlock_free:
2360         ovs_unlock();
2361         kfree_skb(reply);
2362         return err;
2363 }
2364
2365 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2366 {
2367         bool update_headroom = false;
2368         struct nlattr **a = info->attrs;
2369         struct sk_buff *reply;
2370         struct datapath *dp;
2371         struct vport *vport;
2372         unsigned int new_headroom;
2373         int err;
2374
2375         reply = ovs_vport_cmd_alloc_info();
2376         if (!reply)
2377                 return -ENOMEM;
2378
2379         ovs_lock();
2380         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2381         err = PTR_ERR(vport);
2382         if (IS_ERR(vport))
2383                 goto exit_unlock_free;
2384
2385         if (vport->port_no == OVSP_LOCAL) {
2386                 err = -EINVAL;
2387                 goto exit_unlock_free;
2388         }
2389
2390         err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2391                                       info->snd_portid, info->snd_seq, 0,
2392                                       OVS_VPORT_CMD_DEL, GFP_KERNEL);
2393         BUG_ON(err < 0);
2394
2395         /* the vport deletion may trigger dp headroom update */
2396         dp = vport->dp;
2397         if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2398                 update_headroom = true;
2399
2400         netdev_reset_rx_headroom(vport->dev);
2401         ovs_dp_detach_port(vport);
2402
2403         if (update_headroom) {
2404                 new_headroom = ovs_get_max_headroom(dp);
2405
2406                 if (new_headroom < dp->max_headroom)
2407                         ovs_update_headroom(dp, new_headroom);
2408         }
2409         ovs_unlock();
2410
2411         ovs_notify(&dp_vport_genl_family, reply, info);
2412         return 0;
2413
2414 exit_unlock_free:
2415         ovs_unlock();
2416         kfree_skb(reply);
2417         return err;
2418 }
2419
2420 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2421 {
2422         struct nlattr **a = info->attrs;
2423         struct ovs_header *ovs_header = info->userhdr;
2424         struct sk_buff *reply;
2425         struct vport *vport;
2426         int err;
2427
2428         reply = ovs_vport_cmd_alloc_info();
2429         if (!reply)
2430                 return -ENOMEM;
2431
2432         rcu_read_lock();
2433         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2434         err = PTR_ERR(vport);
2435         if (IS_ERR(vport))
2436                 goto exit_unlock_free;
2437         err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2438                                       info->snd_portid, info->snd_seq, 0,
2439                                       OVS_VPORT_CMD_GET, GFP_ATOMIC);
2440         BUG_ON(err < 0);
2441         rcu_read_unlock();
2442
2443         return genlmsg_reply(reply, info);
2444
2445 exit_unlock_free:
2446         rcu_read_unlock();
2447         kfree_skb(reply);
2448         return err;
2449 }
2450
2451 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2452 {
2453         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2454         struct datapath *dp;
2455         int bucket = cb->args[0], skip = cb->args[1];
2456         int i, j = 0;
2457
2458         rcu_read_lock();
2459         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2460         if (!dp) {
2461                 rcu_read_unlock();
2462                 return -ENODEV;
2463         }
2464         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2465                 struct vport *vport;
2466
2467                 j = 0;
2468                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2469                         if (j >= skip &&
2470                             ovs_vport_cmd_fill_info(vport, skb,
2471                                                     sock_net(skb->sk),
2472                                                     NETLINK_CB(cb->skb).portid,
2473                                                     cb->nlh->nlmsg_seq,
2474                                                     NLM_F_MULTI,
2475                                                     OVS_VPORT_CMD_GET,
2476                                                     GFP_ATOMIC) < 0)
2477                                 goto out;
2478
2479                         j++;
2480                 }
2481                 skip = 0;
2482         }
2483 out:
2484         rcu_read_unlock();
2485
2486         cb->args[0] = i;
2487         cb->args[1] = j;
2488
2489         return skb->len;
2490 }
2491
2492 static void ovs_dp_masks_rebalance(struct work_struct *work)
2493 {
2494         struct ovs_net *ovs_net = container_of(work, struct ovs_net,
2495                                                masks_rebalance.work);
2496         struct datapath *dp;
2497
2498         ovs_lock();
2499
2500         list_for_each_entry(dp, &ovs_net->dps, list_node)
2501                 ovs_flow_masks_rebalance(&dp->table);
2502
2503         ovs_unlock();
2504
2505         schedule_delayed_work(&ovs_net->masks_rebalance,
2506                               msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
2507 }
2508
2509 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2510         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2511         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2512         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2513         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2514         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
2515         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2516         [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2517         [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
2518 };
2519
2520 static const struct genl_small_ops dp_vport_genl_ops[] = {
2521         { .cmd = OVS_VPORT_CMD_NEW,
2522           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2523           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2524           .doit = ovs_vport_cmd_new
2525         },
2526         { .cmd = OVS_VPORT_CMD_DEL,
2527           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2528           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2529           .doit = ovs_vport_cmd_del
2530         },
2531         { .cmd = OVS_VPORT_CMD_GET,
2532           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2533           .flags = 0,               /* OK for unprivileged users. */
2534           .doit = ovs_vport_cmd_get,
2535           .dumpit = ovs_vport_cmd_dump
2536         },
2537         { .cmd = OVS_VPORT_CMD_SET,
2538           .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2539           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2540           .doit = ovs_vport_cmd_set,
2541         },
2542 };
2543
2544 struct genl_family dp_vport_genl_family __ro_after_init = {
2545         .hdrsize = sizeof(struct ovs_header),
2546         .name = OVS_VPORT_FAMILY,
2547         .version = OVS_VPORT_VERSION,
2548         .maxattr = OVS_VPORT_ATTR_MAX,
2549         .policy = vport_policy,
2550         .netnsok = true,
2551         .parallel_ops = true,
2552         .small_ops = dp_vport_genl_ops,
2553         .n_small_ops = ARRAY_SIZE(dp_vport_genl_ops),
2554         .resv_start_op = OVS_VPORT_CMD_SET + 1,
2555         .mcgrps = &ovs_dp_vport_multicast_group,
2556         .n_mcgrps = 1,
2557         .module = THIS_MODULE,
2558 };
2559
2560 static struct genl_family * const dp_genl_families[] = {
2561         &dp_datapath_genl_family,
2562         &dp_vport_genl_family,
2563         &dp_flow_genl_family,
2564         &dp_packet_genl_family,
2565         &dp_meter_genl_family,
2566 #if     IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2567         &dp_ct_limit_genl_family,
2568 #endif
2569 };
2570
2571 static void dp_unregister_genl(int n_families)
2572 {
2573         int i;
2574
2575         for (i = 0; i < n_families; i++)
2576                 genl_unregister_family(dp_genl_families[i]);
2577 }
2578
2579 static int __init dp_register_genl(void)
2580 {
2581         int err;
2582         int i;
2583
2584         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2585
2586                 err = genl_register_family(dp_genl_families[i]);
2587                 if (err)
2588                         goto error;
2589         }
2590
2591         return 0;
2592
2593 error:
2594         dp_unregister_genl(i);
2595         return err;
2596 }
2597
2598 static int __net_init ovs_init_net(struct net *net)
2599 {
2600         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2601         int err;
2602
2603         INIT_LIST_HEAD(&ovs_net->dps);
2604         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2605         INIT_DELAYED_WORK(&ovs_net->masks_rebalance, ovs_dp_masks_rebalance);
2606
2607         err = ovs_ct_init(net);
2608         if (err)
2609                 return err;
2610
2611         schedule_delayed_work(&ovs_net->masks_rebalance,
2612                               msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
2613         return 0;
2614 }
2615
2616 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2617                                             struct list_head *head)
2618 {
2619         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2620         struct datapath *dp;
2621
2622         list_for_each_entry(dp, &ovs_net->dps, list_node) {
2623                 int i;
2624
2625                 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2626                         struct vport *vport;
2627
2628                         hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2629                                 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2630                                         continue;
2631
2632                                 if (dev_net(vport->dev) == dnet)
2633                                         list_add(&vport->detach_list, head);
2634                         }
2635                 }
2636         }
2637 }
2638
2639 static void __net_exit ovs_exit_net(struct net *dnet)
2640 {
2641         struct datapath *dp, *dp_next;
2642         struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2643         struct vport *vport, *vport_next;
2644         struct net *net;
2645         LIST_HEAD(head);
2646
2647         ovs_lock();
2648
2649         ovs_ct_exit(dnet);
2650
2651         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2652                 __dp_destroy(dp);
2653
2654         down_read(&net_rwsem);
2655         for_each_net(net)
2656                 list_vports_from_net(net, dnet, &head);
2657         up_read(&net_rwsem);
2658
2659         /* Detach all vports from given namespace. */
2660         list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2661                 list_del(&vport->detach_list);
2662                 ovs_dp_detach_port(vport);
2663         }
2664
2665         ovs_unlock();
2666
2667         cancel_delayed_work_sync(&ovs_net->masks_rebalance);
2668         cancel_work_sync(&ovs_net->dp_notify_work);
2669 }
2670
2671 static struct pernet_operations ovs_net_ops = {
2672         .init = ovs_init_net,
2673         .exit = ovs_exit_net,
2674         .id   = &ovs_net_id,
2675         .size = sizeof(struct ovs_net),
2676 };
2677
2678 static int __init dp_init(void)
2679 {
2680         int err;
2681
2682         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) >
2683                      sizeof_field(struct sk_buff, cb));
2684
2685         pr_info("Open vSwitch switching datapath\n");
2686
2687         err = action_fifos_init();
2688         if (err)
2689                 goto error;
2690
2691         err = ovs_internal_dev_rtnl_link_register();
2692         if (err)
2693                 goto error_action_fifos_exit;
2694
2695         err = ovs_flow_init();
2696         if (err)
2697                 goto error_unreg_rtnl_link;
2698
2699         err = ovs_vport_init();
2700         if (err)
2701                 goto error_flow_exit;
2702
2703         err = register_pernet_device(&ovs_net_ops);
2704         if (err)
2705                 goto error_vport_exit;
2706
2707         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2708         if (err)
2709                 goto error_netns_exit;
2710
2711         err = ovs_netdev_init();
2712         if (err)
2713                 goto error_unreg_notifier;
2714
2715         err = dp_register_genl();
2716         if (err < 0)
2717                 goto error_unreg_netdev;
2718
2719         return 0;
2720
2721 error_unreg_netdev:
2722         ovs_netdev_exit();
2723 error_unreg_notifier:
2724         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2725 error_netns_exit:
2726         unregister_pernet_device(&ovs_net_ops);
2727 error_vport_exit:
2728         ovs_vport_exit();
2729 error_flow_exit:
2730         ovs_flow_exit();
2731 error_unreg_rtnl_link:
2732         ovs_internal_dev_rtnl_link_unregister();
2733 error_action_fifos_exit:
2734         action_fifos_exit();
2735 error:
2736         return err;
2737 }
2738
2739 static void dp_cleanup(void)
2740 {
2741         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2742         ovs_netdev_exit();
2743         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2744         unregister_pernet_device(&ovs_net_ops);
2745         rcu_barrier();
2746         ovs_vport_exit();
2747         ovs_flow_exit();
2748         ovs_internal_dev_rtnl_link_unregister();
2749         action_fifos_exit();
2750 }
2751
2752 module_init(dp_init);
2753 module_exit(dp_cleanup);
2754
2755 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2756 MODULE_LICENSE("GPL");
2757 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2758 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2759 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2760 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
2761 MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
2762 MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);