Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[platform/kernel/linux-rpi.git] / net / core / flow_dissector.c
1 #include <linux/kernel.h>
2 #include <linux/skbuff.h>
3 #include <linux/export.h>
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/dsa.h>
8 #include <net/dst_metadata.h>
9 #include <net/ip.h>
10 #include <net/ipv6.h>
11 #include <net/gre.h>
12 #include <net/pptp.h>
13 #include <net/tipc.h>
14 #include <linux/igmp.h>
15 #include <linux/icmp.h>
16 #include <linux/sctp.h>
17 #include <linux/dccp.h>
18 #include <linux/if_tunnel.h>
19 #include <linux/if_pppox.h>
20 #include <linux/ppp_defs.h>
21 #include <linux/stddef.h>
22 #include <linux/if_ether.h>
23 #include <linux/mpls.h>
24 #include <linux/tcp.h>
25 #include <net/flow_dissector.h>
26 #include <scsi/fc/fc_fcoe.h>
27 #include <uapi/linux/batadv_packet.h>
28
29 static void dissector_set_key(struct flow_dissector *flow_dissector,
30                               enum flow_dissector_key_id key_id)
31 {
32         flow_dissector->used_keys |= (1 << key_id);
33 }
34
35 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
36                              const struct flow_dissector_key *key,
37                              unsigned int key_count)
38 {
39         unsigned int i;
40
41         memset(flow_dissector, 0, sizeof(*flow_dissector));
42
43         for (i = 0; i < key_count; i++, key++) {
44                 /* User should make sure that every key target offset is withing
45                  * boundaries of unsigned short.
46                  */
47                 BUG_ON(key->offset > USHRT_MAX);
48                 BUG_ON(dissector_uses_key(flow_dissector,
49                                           key->key_id));
50
51                 dissector_set_key(flow_dissector, key->key_id);
52                 flow_dissector->offset[key->key_id] = key->offset;
53         }
54
55         /* Ensure that the dissector always includes control and basic key.
56          * That way we are able to avoid handling lack of these in fast path.
57          */
58         BUG_ON(!dissector_uses_key(flow_dissector,
59                                    FLOW_DISSECTOR_KEY_CONTROL));
60         BUG_ON(!dissector_uses_key(flow_dissector,
61                                    FLOW_DISSECTOR_KEY_BASIC));
62 }
63 EXPORT_SYMBOL(skb_flow_dissector_init);
64
65 /**
66  * skb_flow_get_be16 - extract be16 entity
67  * @skb: sk_buff to extract from
68  * @poff: offset to extract at
69  * @data: raw buffer pointer to the packet
70  * @hlen: packet header length
71  *
72  * The function will try to retrieve a be32 entity at
73  * offset poff
74  */
75 static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
76                                 void *data, int hlen)
77 {
78         __be16 *u, _u;
79
80         u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
81         if (u)
82                 return *u;
83
84         return 0;
85 }
86
87 /**
88  * __skb_flow_get_ports - extract the upper layer ports and return them
89  * @skb: sk_buff to extract the ports from
90  * @thoff: transport header offset
91  * @ip_proto: protocol for which to get port offset
92  * @data: raw buffer pointer to the packet, if NULL use skb->data
93  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
94  *
95  * The function will try to retrieve the ports at offset thoff + poff where poff
96  * is the protocol port offset returned from proto_ports_offset
97  */
98 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
99                             void *data, int hlen)
100 {
101         int poff = proto_ports_offset(ip_proto);
102
103         if (!data) {
104                 data = skb->data;
105                 hlen = skb_headlen(skb);
106         }
107
108         if (poff >= 0) {
109                 __be32 *ports, _ports;
110
111                 ports = __skb_header_pointer(skb, thoff + poff,
112                                              sizeof(_ports), data, hlen, &_ports);
113                 if (ports)
114                         return *ports;
115         }
116
117         return 0;
118 }
119 EXPORT_SYMBOL(__skb_flow_get_ports);
120
121 static void
122 skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
123                                    struct flow_dissector *flow_dissector,
124                                    void *target_container)
125 {
126         struct flow_dissector_key_control *ctrl;
127
128         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
129                 return;
130
131         ctrl = skb_flow_dissector_target(flow_dissector,
132                                          FLOW_DISSECTOR_KEY_ENC_CONTROL,
133                                          target_container);
134         ctrl->addr_type = type;
135 }
136
137 void
138 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
139                              struct flow_dissector *flow_dissector,
140                              void *target_container)
141 {
142         struct ip_tunnel_info *info;
143         struct ip_tunnel_key *key;
144
145         /* A quick check to see if there might be something to do. */
146         if (!dissector_uses_key(flow_dissector,
147                                 FLOW_DISSECTOR_KEY_ENC_KEYID) &&
148             !dissector_uses_key(flow_dissector,
149                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
150             !dissector_uses_key(flow_dissector,
151                                 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
152             !dissector_uses_key(flow_dissector,
153                                 FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
154             !dissector_uses_key(flow_dissector,
155                                 FLOW_DISSECTOR_KEY_ENC_PORTS) &&
156             !dissector_uses_key(flow_dissector,
157                                 FLOW_DISSECTOR_KEY_ENC_IP) &&
158             !dissector_uses_key(flow_dissector,
159                                 FLOW_DISSECTOR_KEY_ENC_OPTS))
160                 return;
161
162         info = skb_tunnel_info(skb);
163         if (!info)
164                 return;
165
166         key = &info->key;
167
168         switch (ip_tunnel_info_af(info)) {
169         case AF_INET:
170                 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
171                                                    flow_dissector,
172                                                    target_container);
173                 if (dissector_uses_key(flow_dissector,
174                                        FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
175                         struct flow_dissector_key_ipv4_addrs *ipv4;
176
177                         ipv4 = skb_flow_dissector_target(flow_dissector,
178                                                          FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
179                                                          target_container);
180                         ipv4->src = key->u.ipv4.src;
181                         ipv4->dst = key->u.ipv4.dst;
182                 }
183                 break;
184         case AF_INET6:
185                 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
186                                                    flow_dissector,
187                                                    target_container);
188                 if (dissector_uses_key(flow_dissector,
189                                        FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
190                         struct flow_dissector_key_ipv6_addrs *ipv6;
191
192                         ipv6 = skb_flow_dissector_target(flow_dissector,
193                                                          FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
194                                                          target_container);
195                         ipv6->src = key->u.ipv6.src;
196                         ipv6->dst = key->u.ipv6.dst;
197                 }
198                 break;
199         }
200
201         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
202                 struct flow_dissector_key_keyid *keyid;
203
204                 keyid = skb_flow_dissector_target(flow_dissector,
205                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
206                                                   target_container);
207                 keyid->keyid = tunnel_id_to_key32(key->tun_id);
208         }
209
210         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
211                 struct flow_dissector_key_ports *tp;
212
213                 tp = skb_flow_dissector_target(flow_dissector,
214                                                FLOW_DISSECTOR_KEY_ENC_PORTS,
215                                                target_container);
216                 tp->src = key->tp_src;
217                 tp->dst = key->tp_dst;
218         }
219
220         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
221                 struct flow_dissector_key_ip *ip;
222
223                 ip = skb_flow_dissector_target(flow_dissector,
224                                                FLOW_DISSECTOR_KEY_ENC_IP,
225                                                target_container);
226                 ip->tos = key->tos;
227                 ip->ttl = key->ttl;
228         }
229
230         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
231                 struct flow_dissector_key_enc_opts *enc_opt;
232
233                 enc_opt = skb_flow_dissector_target(flow_dissector,
234                                                     FLOW_DISSECTOR_KEY_ENC_OPTS,
235                                                     target_container);
236
237                 if (info->options_len) {
238                         enc_opt->len = info->options_len;
239                         ip_tunnel_info_opts_get(enc_opt->data, info);
240                         enc_opt->dst_opt_type = info->key.tun_flags &
241                                                 TUNNEL_OPTIONS_PRESENT;
242                 }
243         }
244 }
245 EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
246
247 static enum flow_dissect_ret
248 __skb_flow_dissect_mpls(const struct sk_buff *skb,
249                         struct flow_dissector *flow_dissector,
250                         void *target_container, void *data, int nhoff, int hlen)
251 {
252         struct flow_dissector_key_keyid *key_keyid;
253         struct mpls_label *hdr, _hdr[2];
254         u32 entry, label;
255
256         if (!dissector_uses_key(flow_dissector,
257                                 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
258             !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
259                 return FLOW_DISSECT_RET_OUT_GOOD;
260
261         hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
262                                    hlen, &_hdr);
263         if (!hdr)
264                 return FLOW_DISSECT_RET_OUT_BAD;
265
266         entry = ntohl(hdr[0].entry);
267         label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
268
269         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
270                 struct flow_dissector_key_mpls *key_mpls;
271
272                 key_mpls = skb_flow_dissector_target(flow_dissector,
273                                                      FLOW_DISSECTOR_KEY_MPLS,
274                                                      target_container);
275                 key_mpls->mpls_label = label;
276                 key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
277                                         >> MPLS_LS_TTL_SHIFT;
278                 key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
279                                         >> MPLS_LS_TC_SHIFT;
280                 key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
281                                         >> MPLS_LS_S_SHIFT;
282         }
283
284         if (label == MPLS_LABEL_ENTROPY) {
285                 key_keyid = skb_flow_dissector_target(flow_dissector,
286                                                       FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
287                                                       target_container);
288                 key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
289         }
290         return FLOW_DISSECT_RET_OUT_GOOD;
291 }
292
293 static enum flow_dissect_ret
294 __skb_flow_dissect_arp(const struct sk_buff *skb,
295                        struct flow_dissector *flow_dissector,
296                        void *target_container, void *data, int nhoff, int hlen)
297 {
298         struct flow_dissector_key_arp *key_arp;
299         struct {
300                 unsigned char ar_sha[ETH_ALEN];
301                 unsigned char ar_sip[4];
302                 unsigned char ar_tha[ETH_ALEN];
303                 unsigned char ar_tip[4];
304         } *arp_eth, _arp_eth;
305         const struct arphdr *arp;
306         struct arphdr _arp;
307
308         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
309                 return FLOW_DISSECT_RET_OUT_GOOD;
310
311         arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
312                                    hlen, &_arp);
313         if (!arp)
314                 return FLOW_DISSECT_RET_OUT_BAD;
315
316         if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
317             arp->ar_pro != htons(ETH_P_IP) ||
318             arp->ar_hln != ETH_ALEN ||
319             arp->ar_pln != 4 ||
320             (arp->ar_op != htons(ARPOP_REPLY) &&
321              arp->ar_op != htons(ARPOP_REQUEST)))
322                 return FLOW_DISSECT_RET_OUT_BAD;
323
324         arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
325                                        sizeof(_arp_eth), data,
326                                        hlen, &_arp_eth);
327         if (!arp_eth)
328                 return FLOW_DISSECT_RET_OUT_BAD;
329
330         key_arp = skb_flow_dissector_target(flow_dissector,
331                                             FLOW_DISSECTOR_KEY_ARP,
332                                             target_container);
333
334         memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
335         memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
336
337         /* Only store the lower byte of the opcode;
338          * this covers ARPOP_REPLY and ARPOP_REQUEST.
339          */
340         key_arp->op = ntohs(arp->ar_op) & 0xff;
341
342         ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
343         ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
344
345         return FLOW_DISSECT_RET_OUT_GOOD;
346 }
347
348 static enum flow_dissect_ret
349 __skb_flow_dissect_gre(const struct sk_buff *skb,
350                        struct flow_dissector_key_control *key_control,
351                        struct flow_dissector *flow_dissector,
352                        void *target_container, void *data,
353                        __be16 *p_proto, int *p_nhoff, int *p_hlen,
354                        unsigned int flags)
355 {
356         struct flow_dissector_key_keyid *key_keyid;
357         struct gre_base_hdr *hdr, _hdr;
358         int offset = 0;
359         u16 gre_ver;
360
361         hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
362                                    data, *p_hlen, &_hdr);
363         if (!hdr)
364                 return FLOW_DISSECT_RET_OUT_BAD;
365
366         /* Only look inside GRE without routing */
367         if (hdr->flags & GRE_ROUTING)
368                 return FLOW_DISSECT_RET_OUT_GOOD;
369
370         /* Only look inside GRE for version 0 and 1 */
371         gre_ver = ntohs(hdr->flags & GRE_VERSION);
372         if (gre_ver > 1)
373                 return FLOW_DISSECT_RET_OUT_GOOD;
374
375         *p_proto = hdr->protocol;
376         if (gre_ver) {
377                 /* Version1 must be PPTP, and check the flags */
378                 if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
379                         return FLOW_DISSECT_RET_OUT_GOOD;
380         }
381
382         offset += sizeof(struct gre_base_hdr);
383
384         if (hdr->flags & GRE_CSUM)
385                 offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
386                           sizeof(((struct gre_full_hdr *) 0)->reserved1);
387
388         if (hdr->flags & GRE_KEY) {
389                 const __be32 *keyid;
390                 __be32 _keyid;
391
392                 keyid = __skb_header_pointer(skb, *p_nhoff + offset,
393                                              sizeof(_keyid),
394                                              data, *p_hlen, &_keyid);
395                 if (!keyid)
396                         return FLOW_DISSECT_RET_OUT_BAD;
397
398                 if (dissector_uses_key(flow_dissector,
399                                        FLOW_DISSECTOR_KEY_GRE_KEYID)) {
400                         key_keyid = skb_flow_dissector_target(flow_dissector,
401                                                               FLOW_DISSECTOR_KEY_GRE_KEYID,
402                                                               target_container);
403                         if (gre_ver == 0)
404                                 key_keyid->keyid = *keyid;
405                         else
406                                 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
407                 }
408                 offset += sizeof(((struct gre_full_hdr *) 0)->key);
409         }
410
411         if (hdr->flags & GRE_SEQ)
412                 offset += sizeof(((struct pptp_gre_header *) 0)->seq);
413
414         if (gre_ver == 0) {
415                 if (*p_proto == htons(ETH_P_TEB)) {
416                         const struct ethhdr *eth;
417                         struct ethhdr _eth;
418
419                         eth = __skb_header_pointer(skb, *p_nhoff + offset,
420                                                    sizeof(_eth),
421                                                    data, *p_hlen, &_eth);
422                         if (!eth)
423                                 return FLOW_DISSECT_RET_OUT_BAD;
424                         *p_proto = eth->h_proto;
425                         offset += sizeof(*eth);
426
427                         /* Cap headers that we access via pointers at the
428                          * end of the Ethernet header as our maximum alignment
429                          * at that point is only 2 bytes.
430                          */
431                         if (NET_IP_ALIGN)
432                                 *p_hlen = *p_nhoff + offset;
433                 }
434         } else { /* version 1, must be PPTP */
435                 u8 _ppp_hdr[PPP_HDRLEN];
436                 u8 *ppp_hdr;
437
438                 if (hdr->flags & GRE_ACK)
439                         offset += sizeof(((struct pptp_gre_header *) 0)->ack);
440
441                 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
442                                                sizeof(_ppp_hdr),
443                                                data, *p_hlen, _ppp_hdr);
444                 if (!ppp_hdr)
445                         return FLOW_DISSECT_RET_OUT_BAD;
446
447                 switch (PPP_PROTOCOL(ppp_hdr)) {
448                 case PPP_IP:
449                         *p_proto = htons(ETH_P_IP);
450                         break;
451                 case PPP_IPV6:
452                         *p_proto = htons(ETH_P_IPV6);
453                         break;
454                 default:
455                         /* Could probably catch some more like MPLS */
456                         break;
457                 }
458
459                 offset += PPP_HDRLEN;
460         }
461
462         *p_nhoff += offset;
463         key_control->flags |= FLOW_DIS_ENCAPSULATION;
464         if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
465                 return FLOW_DISSECT_RET_OUT_GOOD;
466
467         return FLOW_DISSECT_RET_PROTO_AGAIN;
468 }
469
470 /**
471  * __skb_flow_dissect_batadv() - dissect batman-adv header
472  * @skb: sk_buff to with the batman-adv header
473  * @key_control: flow dissectors control key
474  * @data: raw buffer pointer to the packet, if NULL use skb->data
475  * @p_proto: pointer used to update the protocol to process next
476  * @p_nhoff: pointer used to update inner network header offset
477  * @hlen: packet header length
478  * @flags: any combination of FLOW_DISSECTOR_F_*
479  *
480  * ETH_P_BATMAN packets are tried to be dissected. Only
481  * &struct batadv_unicast packets are actually processed because they contain an
482  * inner ethernet header and are usually followed by actual network header. This
483  * allows the flow dissector to continue processing the packet.
484  *
485  * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
486  *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
487  *  otherwise FLOW_DISSECT_RET_OUT_BAD
488  */
489 static enum flow_dissect_ret
490 __skb_flow_dissect_batadv(const struct sk_buff *skb,
491                           struct flow_dissector_key_control *key_control,
492                           void *data, __be16 *p_proto, int *p_nhoff, int hlen,
493                           unsigned int flags)
494 {
495         struct {
496                 struct batadv_unicast_packet batadv_unicast;
497                 struct ethhdr eth;
498         } *hdr, _hdr;
499
500         hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
501                                    &_hdr);
502         if (!hdr)
503                 return FLOW_DISSECT_RET_OUT_BAD;
504
505         if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
506                 return FLOW_DISSECT_RET_OUT_BAD;
507
508         if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
509                 return FLOW_DISSECT_RET_OUT_BAD;
510
511         *p_proto = hdr->eth.h_proto;
512         *p_nhoff += sizeof(*hdr);
513
514         key_control->flags |= FLOW_DIS_ENCAPSULATION;
515         if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
516                 return FLOW_DISSECT_RET_OUT_GOOD;
517
518         return FLOW_DISSECT_RET_PROTO_AGAIN;
519 }
520
521 static void
522 __skb_flow_dissect_tcp(const struct sk_buff *skb,
523                        struct flow_dissector *flow_dissector,
524                        void *target_container, void *data, int thoff, int hlen)
525 {
526         struct flow_dissector_key_tcp *key_tcp;
527         struct tcphdr *th, _th;
528
529         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
530                 return;
531
532         th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
533         if (!th)
534                 return;
535
536         if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
537                 return;
538
539         key_tcp = skb_flow_dissector_target(flow_dissector,
540                                             FLOW_DISSECTOR_KEY_TCP,
541                                             target_container);
542         key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
543 }
544
545 static void
546 __skb_flow_dissect_ipv4(const struct sk_buff *skb,
547                         struct flow_dissector *flow_dissector,
548                         void *target_container, void *data, const struct iphdr *iph)
549 {
550         struct flow_dissector_key_ip *key_ip;
551
552         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
553                 return;
554
555         key_ip = skb_flow_dissector_target(flow_dissector,
556                                            FLOW_DISSECTOR_KEY_IP,
557                                            target_container);
558         key_ip->tos = iph->tos;
559         key_ip->ttl = iph->ttl;
560 }
561
562 static void
563 __skb_flow_dissect_ipv6(const struct sk_buff *skb,
564                         struct flow_dissector *flow_dissector,
565                         void *target_container, void *data, const struct ipv6hdr *iph)
566 {
567         struct flow_dissector_key_ip *key_ip;
568
569         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
570                 return;
571
572         key_ip = skb_flow_dissector_target(flow_dissector,
573                                            FLOW_DISSECTOR_KEY_IP,
574                                            target_container);
575         key_ip->tos = ipv6_get_dsfield(iph);
576         key_ip->ttl = iph->hop_limit;
577 }
578
579 /* Maximum number of protocol headers that can be parsed in
580  * __skb_flow_dissect
581  */
582 #define MAX_FLOW_DISSECT_HDRS   15
583
584 static bool skb_flow_dissect_allowed(int *num_hdrs)
585 {
586         ++*num_hdrs;
587
588         return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
589 }
590
591 /**
592  * __skb_flow_dissect - extract the flow_keys struct and return it
593  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
594  * @flow_dissector: list of keys to dissect
595  * @target_container: target structure to put dissected values into
596  * @data: raw buffer pointer to the packet, if NULL use skb->data
597  * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
598  * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
599  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
600  *
601  * The function will try to retrieve individual keys into target specified
602  * by flow_dissector from either the skbuff or a raw buffer specified by the
603  * rest parameters.
604  *
605  * Caller must take care of zeroing target container memory.
606  */
607 bool __skb_flow_dissect(const struct sk_buff *skb,
608                         struct flow_dissector *flow_dissector,
609                         void *target_container,
610                         void *data, __be16 proto, int nhoff, int hlen,
611                         unsigned int flags)
612 {
613         struct flow_dissector_key_control *key_control;
614         struct flow_dissector_key_basic *key_basic;
615         struct flow_dissector_key_addrs *key_addrs;
616         struct flow_dissector_key_ports *key_ports;
617         struct flow_dissector_key_icmp *key_icmp;
618         struct flow_dissector_key_tags *key_tags;
619         struct flow_dissector_key_vlan *key_vlan;
620         enum flow_dissect_ret fdret;
621         enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
622         int num_hdrs = 0;
623         u8 ip_proto = 0;
624         bool ret;
625
626         if (!data) {
627                 data = skb->data;
628                 proto = skb_vlan_tag_present(skb) ?
629                          skb->vlan_proto : skb->protocol;
630                 nhoff = skb_network_offset(skb);
631                 hlen = skb_headlen(skb);
632 #if IS_ENABLED(CONFIG_NET_DSA)
633                 if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
634                         const struct dsa_device_ops *ops;
635                         int offset;
636
637                         ops = skb->dev->dsa_ptr->tag_ops;
638                         if (ops->flow_dissect &&
639                             !ops->flow_dissect(skb, &proto, &offset)) {
640                                 hlen -= offset;
641                                 nhoff += offset;
642                         }
643                 }
644 #endif
645         }
646
647         /* It is ensured by skb_flow_dissector_init() that control key will
648          * be always present.
649          */
650         key_control = skb_flow_dissector_target(flow_dissector,
651                                                 FLOW_DISSECTOR_KEY_CONTROL,
652                                                 target_container);
653
654         /* It is ensured by skb_flow_dissector_init() that basic key will
655          * be always present.
656          */
657         key_basic = skb_flow_dissector_target(flow_dissector,
658                                               FLOW_DISSECTOR_KEY_BASIC,
659                                               target_container);
660
661         if (dissector_uses_key(flow_dissector,
662                                FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
663                 struct ethhdr *eth = eth_hdr(skb);
664                 struct flow_dissector_key_eth_addrs *key_eth_addrs;
665
666                 key_eth_addrs = skb_flow_dissector_target(flow_dissector,
667                                                           FLOW_DISSECTOR_KEY_ETH_ADDRS,
668                                                           target_container);
669                 memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
670         }
671
672 proto_again:
673         fdret = FLOW_DISSECT_RET_CONTINUE;
674
675         switch (proto) {
676         case htons(ETH_P_IP): {
677                 const struct iphdr *iph;
678                 struct iphdr _iph;
679
680                 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
681                 if (!iph || iph->ihl < 5) {
682                         fdret = FLOW_DISSECT_RET_OUT_BAD;
683                         break;
684                 }
685
686                 nhoff += iph->ihl * 4;
687
688                 ip_proto = iph->protocol;
689
690                 if (dissector_uses_key(flow_dissector,
691                                        FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
692                         key_addrs = skb_flow_dissector_target(flow_dissector,
693                                                               FLOW_DISSECTOR_KEY_IPV4_ADDRS,
694                                                               target_container);
695
696                         memcpy(&key_addrs->v4addrs, &iph->saddr,
697                                sizeof(key_addrs->v4addrs));
698                         key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
699                 }
700
701                 if (ip_is_fragment(iph)) {
702                         key_control->flags |= FLOW_DIS_IS_FRAGMENT;
703
704                         if (iph->frag_off & htons(IP_OFFSET)) {
705                                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
706                                 break;
707                         } else {
708                                 key_control->flags |= FLOW_DIS_FIRST_FRAG;
709                                 if (!(flags &
710                                       FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
711                                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
712                                         break;
713                                 }
714                         }
715                 }
716
717                 __skb_flow_dissect_ipv4(skb, flow_dissector,
718                                         target_container, data, iph);
719
720                 if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) {
721                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
722                         break;
723                 }
724
725                 break;
726         }
727         case htons(ETH_P_IPV6): {
728                 const struct ipv6hdr *iph;
729                 struct ipv6hdr _iph;
730
731                 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
732                 if (!iph) {
733                         fdret = FLOW_DISSECT_RET_OUT_BAD;
734                         break;
735                 }
736
737                 ip_proto = iph->nexthdr;
738                 nhoff += sizeof(struct ipv6hdr);
739
740                 if (dissector_uses_key(flow_dissector,
741                                        FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
742                         key_addrs = skb_flow_dissector_target(flow_dissector,
743                                                               FLOW_DISSECTOR_KEY_IPV6_ADDRS,
744                                                               target_container);
745
746                         memcpy(&key_addrs->v6addrs, &iph->saddr,
747                                sizeof(key_addrs->v6addrs));
748                         key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
749                 }
750
751                 if ((dissector_uses_key(flow_dissector,
752                                         FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
753                      (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
754                     ip6_flowlabel(iph)) {
755                         __be32 flow_label = ip6_flowlabel(iph);
756
757                         if (dissector_uses_key(flow_dissector,
758                                                FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
759                                 key_tags = skb_flow_dissector_target(flow_dissector,
760                                                                      FLOW_DISSECTOR_KEY_FLOW_LABEL,
761                                                                      target_container);
762                                 key_tags->flow_label = ntohl(flow_label);
763                         }
764                         if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
765                                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
766                                 break;
767                         }
768                 }
769
770                 __skb_flow_dissect_ipv6(skb, flow_dissector,
771                                         target_container, data, iph);
772
773                 if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
774                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
775
776                 break;
777         }
778         case htons(ETH_P_8021AD):
779         case htons(ETH_P_8021Q): {
780                 const struct vlan_hdr *vlan = NULL;
781                 struct vlan_hdr _vlan;
782                 __be16 saved_vlan_tpid = proto;
783
784                 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
785                     skb && skb_vlan_tag_present(skb)) {
786                         proto = skb->protocol;
787                 } else {
788                         vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
789                                                     data, hlen, &_vlan);
790                         if (!vlan) {
791                                 fdret = FLOW_DISSECT_RET_OUT_BAD;
792                                 break;
793                         }
794
795                         proto = vlan->h_vlan_encapsulated_proto;
796                         nhoff += sizeof(*vlan);
797                 }
798
799                 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
800                         dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
801                 } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
802                         dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
803                 } else {
804                         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
805                         break;
806                 }
807
808                 if (dissector_uses_key(flow_dissector, dissector_vlan)) {
809                         key_vlan = skb_flow_dissector_target(flow_dissector,
810                                                              dissector_vlan,
811                                                              target_container);
812
813                         if (!vlan) {
814                                 key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
815                                 key_vlan->vlan_priority =
816                                         (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
817                         } else {
818                                 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
819                                         VLAN_VID_MASK;
820                                 key_vlan->vlan_priority =
821                                         (ntohs(vlan->h_vlan_TCI) &
822                                          VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
823                         }
824                         key_vlan->vlan_tpid = saved_vlan_tpid;
825                 }
826
827                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
828                 break;
829         }
830         case htons(ETH_P_PPP_SES): {
831                 struct {
832                         struct pppoe_hdr hdr;
833                         __be16 proto;
834                 } *hdr, _hdr;
835                 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
836                 if (!hdr) {
837                         fdret = FLOW_DISSECT_RET_OUT_BAD;
838                         break;
839                 }
840
841                 proto = hdr->proto;
842                 nhoff += PPPOE_SES_HLEN;
843                 switch (proto) {
844                 case htons(PPP_IP):
845                         proto = htons(ETH_P_IP);
846                         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
847                         break;
848                 case htons(PPP_IPV6):
849                         proto = htons(ETH_P_IPV6);
850                         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
851                         break;
852                 default:
853                         fdret = FLOW_DISSECT_RET_OUT_BAD;
854                         break;
855                 }
856                 break;
857         }
858         case htons(ETH_P_TIPC): {
859                 struct tipc_basic_hdr *hdr, _hdr;
860
861                 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
862                                            data, hlen, &_hdr);
863                 if (!hdr) {
864                         fdret = FLOW_DISSECT_RET_OUT_BAD;
865                         break;
866                 }
867
868                 if (dissector_uses_key(flow_dissector,
869                                        FLOW_DISSECTOR_KEY_TIPC)) {
870                         key_addrs = skb_flow_dissector_target(flow_dissector,
871                                                               FLOW_DISSECTOR_KEY_TIPC,
872                                                               target_container);
873                         key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
874                         key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
875                 }
876                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
877                 break;
878         }
879
880         case htons(ETH_P_MPLS_UC):
881         case htons(ETH_P_MPLS_MC):
882                 fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
883                                                 target_container, data,
884                                                 nhoff, hlen);
885                 break;
886         case htons(ETH_P_FCOE):
887                 if ((hlen - nhoff) < FCOE_HEADER_LEN) {
888                         fdret = FLOW_DISSECT_RET_OUT_BAD;
889                         break;
890                 }
891
892                 nhoff += FCOE_HEADER_LEN;
893                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
894                 break;
895
896         case htons(ETH_P_ARP):
897         case htons(ETH_P_RARP):
898                 fdret = __skb_flow_dissect_arp(skb, flow_dissector,
899                                                target_container, data,
900                                                nhoff, hlen);
901                 break;
902
903         case htons(ETH_P_BATMAN):
904                 fdret = __skb_flow_dissect_batadv(skb, key_control, data,
905                                                   &proto, &nhoff, hlen, flags);
906                 break;
907
908         default:
909                 fdret = FLOW_DISSECT_RET_OUT_BAD;
910                 break;
911         }
912
913         /* Process result of proto processing */
914         switch (fdret) {
915         case FLOW_DISSECT_RET_OUT_GOOD:
916                 goto out_good;
917         case FLOW_DISSECT_RET_PROTO_AGAIN:
918                 if (skb_flow_dissect_allowed(&num_hdrs))
919                         goto proto_again;
920                 goto out_good;
921         case FLOW_DISSECT_RET_CONTINUE:
922         case FLOW_DISSECT_RET_IPPROTO_AGAIN:
923                 break;
924         case FLOW_DISSECT_RET_OUT_BAD:
925         default:
926                 goto out_bad;
927         }
928
929 ip_proto_again:
930         fdret = FLOW_DISSECT_RET_CONTINUE;
931
932         switch (ip_proto) {
933         case IPPROTO_GRE:
934                 fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
935                                                target_container, data,
936                                                &proto, &nhoff, &hlen, flags);
937                 break;
938
939         case NEXTHDR_HOP:
940         case NEXTHDR_ROUTING:
941         case NEXTHDR_DEST: {
942                 u8 _opthdr[2], *opthdr;
943
944                 if (proto != htons(ETH_P_IPV6))
945                         break;
946
947                 opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
948                                               data, hlen, &_opthdr);
949                 if (!opthdr) {
950                         fdret = FLOW_DISSECT_RET_OUT_BAD;
951                         break;
952                 }
953
954                 ip_proto = opthdr[0];
955                 nhoff += (opthdr[1] + 1) << 3;
956
957                 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
958                 break;
959         }
960         case NEXTHDR_FRAGMENT: {
961                 struct frag_hdr _fh, *fh;
962
963                 if (proto != htons(ETH_P_IPV6))
964                         break;
965
966                 fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
967                                           data, hlen, &_fh);
968
969                 if (!fh) {
970                         fdret = FLOW_DISSECT_RET_OUT_BAD;
971                         break;
972                 }
973
974                 key_control->flags |= FLOW_DIS_IS_FRAGMENT;
975
976                 nhoff += sizeof(_fh);
977                 ip_proto = fh->nexthdr;
978
979                 if (!(fh->frag_off & htons(IP6_OFFSET))) {
980                         key_control->flags |= FLOW_DIS_FIRST_FRAG;
981                         if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
982                                 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
983                                 break;
984                         }
985                 }
986
987                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
988                 break;
989         }
990         case IPPROTO_IPIP:
991                 proto = htons(ETH_P_IP);
992
993                 key_control->flags |= FLOW_DIS_ENCAPSULATION;
994                 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
995                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
996                         break;
997                 }
998
999                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1000                 break;
1001
1002         case IPPROTO_IPV6:
1003                 proto = htons(ETH_P_IPV6);
1004
1005                 key_control->flags |= FLOW_DIS_ENCAPSULATION;
1006                 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1007                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
1008                         break;
1009                 }
1010
1011                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1012                 break;
1013
1014
1015         case IPPROTO_MPLS:
1016                 proto = htons(ETH_P_MPLS_UC);
1017                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1018                 break;
1019
1020         case IPPROTO_TCP:
1021                 __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
1022                                        data, nhoff, hlen);
1023                 break;
1024
1025         default:
1026                 break;
1027         }
1028
1029         if (dissector_uses_key(flow_dissector,
1030                                FLOW_DISSECTOR_KEY_PORTS)) {
1031                 key_ports = skb_flow_dissector_target(flow_dissector,
1032                                                       FLOW_DISSECTOR_KEY_PORTS,
1033                                                       target_container);
1034                 key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
1035                                                         data, hlen);
1036         }
1037
1038         if (dissector_uses_key(flow_dissector,
1039                                FLOW_DISSECTOR_KEY_ICMP)) {
1040                 key_icmp = skb_flow_dissector_target(flow_dissector,
1041                                                      FLOW_DISSECTOR_KEY_ICMP,
1042                                                      target_container);
1043                 key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
1044         }
1045
1046         /* Process result of IP proto processing */
1047         switch (fdret) {
1048         case FLOW_DISSECT_RET_PROTO_AGAIN:
1049                 if (skb_flow_dissect_allowed(&num_hdrs))
1050                         goto proto_again;
1051                 break;
1052         case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1053                 if (skb_flow_dissect_allowed(&num_hdrs))
1054                         goto ip_proto_again;
1055                 break;
1056         case FLOW_DISSECT_RET_OUT_GOOD:
1057         case FLOW_DISSECT_RET_CONTINUE:
1058                 break;
1059         case FLOW_DISSECT_RET_OUT_BAD:
1060         default:
1061                 goto out_bad;
1062         }
1063
1064 out_good:
1065         ret = true;
1066
1067 out:
1068         key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
1069         key_basic->n_proto = proto;
1070         key_basic->ip_proto = ip_proto;
1071
1072         return ret;
1073
1074 out_bad:
1075         ret = false;
1076         goto out;
1077 }
1078 EXPORT_SYMBOL(__skb_flow_dissect);
1079
1080 static u32 hashrnd __read_mostly;
1081 static __always_inline void __flow_hash_secret_init(void)
1082 {
1083         net_get_random_once(&hashrnd, sizeof(hashrnd));
1084 }
1085
1086 static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
1087                                              u32 keyval)
1088 {
1089         return jhash2(words, length, keyval);
1090 }
1091
1092 static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
1093 {
1094         const void *p = flow;
1095
1096         BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
1097         return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
1098 }
1099
1100 static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
1101 {
1102         size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
1103         BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
1104         BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
1105                      sizeof(*flow) - sizeof(flow->addrs));
1106
1107         switch (flow->control.addr_type) {
1108         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1109                 diff -= sizeof(flow->addrs.v4addrs);
1110                 break;
1111         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1112                 diff -= sizeof(flow->addrs.v6addrs);
1113                 break;
1114         case FLOW_DISSECTOR_KEY_TIPC:
1115                 diff -= sizeof(flow->addrs.tipckey);
1116                 break;
1117         }
1118         return (sizeof(*flow) - diff) / sizeof(u32);
1119 }
1120
1121 __be32 flow_get_u32_src(const struct flow_keys *flow)
1122 {
1123         switch (flow->control.addr_type) {
1124         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1125                 return flow->addrs.v4addrs.src;
1126         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1127                 return (__force __be32)ipv6_addr_hash(
1128                         &flow->addrs.v6addrs.src);
1129         case FLOW_DISSECTOR_KEY_TIPC:
1130                 return flow->addrs.tipckey.key;
1131         default:
1132                 return 0;
1133         }
1134 }
1135 EXPORT_SYMBOL(flow_get_u32_src);
1136
1137 __be32 flow_get_u32_dst(const struct flow_keys *flow)
1138 {
1139         switch (flow->control.addr_type) {
1140         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1141                 return flow->addrs.v4addrs.dst;
1142         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1143                 return (__force __be32)ipv6_addr_hash(
1144                         &flow->addrs.v6addrs.dst);
1145         default:
1146                 return 0;
1147         }
1148 }
1149 EXPORT_SYMBOL(flow_get_u32_dst);
1150
1151 static inline void __flow_hash_consistentify(struct flow_keys *keys)
1152 {
1153         int addr_diff, i;
1154
1155         switch (keys->control.addr_type) {
1156         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1157                 addr_diff = (__force u32)keys->addrs.v4addrs.dst -
1158                             (__force u32)keys->addrs.v4addrs.src;
1159                 if ((addr_diff < 0) ||
1160                     (addr_diff == 0 &&
1161                      ((__force u16)keys->ports.dst <
1162                       (__force u16)keys->ports.src))) {
1163                         swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
1164                         swap(keys->ports.src, keys->ports.dst);
1165                 }
1166                 break;
1167         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1168                 addr_diff = memcmp(&keys->addrs.v6addrs.dst,
1169                                    &keys->addrs.v6addrs.src,
1170                                    sizeof(keys->addrs.v6addrs.dst));
1171                 if ((addr_diff < 0) ||
1172                     (addr_diff == 0 &&
1173                      ((__force u16)keys->ports.dst <
1174                       (__force u16)keys->ports.src))) {
1175                         for (i = 0; i < 4; i++)
1176                                 swap(keys->addrs.v6addrs.src.s6_addr32[i],
1177                                      keys->addrs.v6addrs.dst.s6_addr32[i]);
1178                         swap(keys->ports.src, keys->ports.dst);
1179                 }
1180                 break;
1181         }
1182 }
1183
1184 static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
1185 {
1186         u32 hash;
1187
1188         __flow_hash_consistentify(keys);
1189
1190         hash = __flow_hash_words(flow_keys_hash_start(keys),
1191                                  flow_keys_hash_length(keys), keyval);
1192         if (!hash)
1193                 hash = 1;
1194
1195         return hash;
1196 }
1197
1198 u32 flow_hash_from_keys(struct flow_keys *keys)
1199 {
1200         __flow_hash_secret_init();
1201         return __flow_hash_from_keys(keys, hashrnd);
1202 }
1203 EXPORT_SYMBOL(flow_hash_from_keys);
1204
1205 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
1206                                   struct flow_keys *keys, u32 keyval)
1207 {
1208         skb_flow_dissect_flow_keys(skb, keys,
1209                                    FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1210
1211         return __flow_hash_from_keys(keys, keyval);
1212 }
1213
1214 struct _flow_keys_digest_data {
1215         __be16  n_proto;
1216         u8      ip_proto;
1217         u8      padding;
1218         __be32  ports;
1219         __be32  src;
1220         __be32  dst;
1221 };
1222
1223 void make_flow_keys_digest(struct flow_keys_digest *digest,
1224                            const struct flow_keys *flow)
1225 {
1226         struct _flow_keys_digest_data *data =
1227             (struct _flow_keys_digest_data *)digest;
1228
1229         BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
1230
1231         memset(digest, 0, sizeof(*digest));
1232
1233         data->n_proto = flow->basic.n_proto;
1234         data->ip_proto = flow->basic.ip_proto;
1235         data->ports = flow->ports.ports;
1236         data->src = flow->addrs.v4addrs.src;
1237         data->dst = flow->addrs.v4addrs.dst;
1238 }
1239 EXPORT_SYMBOL(make_flow_keys_digest);
1240
1241 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
1242
1243 u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
1244 {
1245         struct flow_keys keys;
1246
1247         __flow_hash_secret_init();
1248
1249         memset(&keys, 0, sizeof(keys));
1250         __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
1251                            NULL, 0, 0, 0,
1252                            FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1253
1254         return __flow_hash_from_keys(&keys, hashrnd);
1255 }
1256 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1257
1258 /**
1259  * __skb_get_hash: calculate a flow hash
1260  * @skb: sk_buff to calculate flow hash from
1261  *
1262  * This function calculates a flow hash based on src/dst addresses
1263  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
1264  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
1265  * if hash is a canonical 4-tuple hash over transport ports.
1266  */
1267 void __skb_get_hash(struct sk_buff *skb)
1268 {
1269         struct flow_keys keys;
1270         u32 hash;
1271
1272         __flow_hash_secret_init();
1273
1274         hash = ___skb_get_hash(skb, &keys, hashrnd);
1275
1276         __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1277 }
1278 EXPORT_SYMBOL(__skb_get_hash);
1279
1280 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
1281 {
1282         struct flow_keys keys;
1283
1284         return ___skb_get_hash(skb, &keys, perturb);
1285 }
1286 EXPORT_SYMBOL(skb_get_hash_perturb);
1287
1288 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1289                    const struct flow_keys_basic *keys, int hlen)
1290 {
1291         u32 poff = keys->control.thoff;
1292
1293         /* skip L4 headers for fragments after the first */
1294         if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
1295             !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
1296                 return poff;
1297
1298         switch (keys->basic.ip_proto) {
1299         case IPPROTO_TCP: {
1300                 /* access doff as u8 to avoid unaligned access */
1301                 const u8 *doff;
1302                 u8 _doff;
1303
1304                 doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
1305                                             data, hlen, &_doff);
1306                 if (!doff)
1307                         return poff;
1308
1309                 poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
1310                 break;
1311         }
1312         case IPPROTO_UDP:
1313         case IPPROTO_UDPLITE:
1314                 poff += sizeof(struct udphdr);
1315                 break;
1316         /* For the rest, we do not really care about header
1317          * extensions at this point for now.
1318          */
1319         case IPPROTO_ICMP:
1320                 poff += sizeof(struct icmphdr);
1321                 break;
1322         case IPPROTO_ICMPV6:
1323                 poff += sizeof(struct icmp6hdr);
1324                 break;
1325         case IPPROTO_IGMP:
1326                 poff += sizeof(struct igmphdr);
1327                 break;
1328         case IPPROTO_DCCP:
1329                 poff += sizeof(struct dccp_hdr);
1330                 break;
1331         case IPPROTO_SCTP:
1332                 poff += sizeof(struct sctphdr);
1333                 break;
1334         }
1335
1336         return poff;
1337 }
1338
1339 /**
1340  * skb_get_poff - get the offset to the payload
1341  * @skb: sk_buff to get the payload offset from
1342  *
1343  * The function will get the offset to the payload as far as it could
1344  * be dissected.  The main user is currently BPF, so that we can dynamically
1345  * truncate packets without needing to push actual payload to the user
1346  * space and can analyze headers only, instead.
1347  */
1348 u32 skb_get_poff(const struct sk_buff *skb)
1349 {
1350         struct flow_keys_basic keys;
1351
1352         if (!skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
1353                 return 0;
1354
1355         return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
1356 }
1357
1358 __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1359 {
1360         memset(keys, 0, sizeof(*keys));
1361
1362         memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
1363             sizeof(keys->addrs.v6addrs.src));
1364         memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
1365             sizeof(keys->addrs.v6addrs.dst));
1366         keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1367         keys->ports.src = fl6->fl6_sport;
1368         keys->ports.dst = fl6->fl6_dport;
1369         keys->keyid.keyid = fl6->fl6_gre_key;
1370         keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1371         keys->basic.ip_proto = fl6->flowi6_proto;
1372
1373         return flow_hash_from_keys(keys);
1374 }
1375 EXPORT_SYMBOL(__get_hash_from_flowi6);
1376
1377 static const struct flow_dissector_key flow_keys_dissector_keys[] = {
1378         {
1379                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1380                 .offset = offsetof(struct flow_keys, control),
1381         },
1382         {
1383                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1384                 .offset = offsetof(struct flow_keys, basic),
1385         },
1386         {
1387                 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1388                 .offset = offsetof(struct flow_keys, addrs.v4addrs),
1389         },
1390         {
1391                 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1392                 .offset = offsetof(struct flow_keys, addrs.v6addrs),
1393         },
1394         {
1395                 .key_id = FLOW_DISSECTOR_KEY_TIPC,
1396                 .offset = offsetof(struct flow_keys, addrs.tipckey),
1397         },
1398         {
1399                 .key_id = FLOW_DISSECTOR_KEY_PORTS,
1400                 .offset = offsetof(struct flow_keys, ports),
1401         },
1402         {
1403                 .key_id = FLOW_DISSECTOR_KEY_VLAN,
1404                 .offset = offsetof(struct flow_keys, vlan),
1405         },
1406         {
1407                 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
1408                 .offset = offsetof(struct flow_keys, tags),
1409         },
1410         {
1411                 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
1412                 .offset = offsetof(struct flow_keys, keyid),
1413         },
1414 };
1415
1416 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
1417         {
1418                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1419                 .offset = offsetof(struct flow_keys, control),
1420         },
1421         {
1422                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1423                 .offset = offsetof(struct flow_keys, basic),
1424         },
1425         {
1426                 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1427                 .offset = offsetof(struct flow_keys, addrs.v4addrs),
1428         },
1429         {
1430                 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1431                 .offset = offsetof(struct flow_keys, addrs.v6addrs),
1432         },
1433         {
1434                 .key_id = FLOW_DISSECTOR_KEY_PORTS,
1435                 .offset = offsetof(struct flow_keys, ports),
1436         },
1437 };
1438
1439 static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = {
1440         {
1441                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1442                 .offset = offsetof(struct flow_keys, control),
1443         },
1444         {
1445                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1446                 .offset = offsetof(struct flow_keys, basic),
1447         },
1448 };
1449
1450 struct flow_dissector flow_keys_dissector __read_mostly;
1451 EXPORT_SYMBOL(flow_keys_dissector);
1452
1453 struct flow_dissector flow_keys_basic_dissector __read_mostly;
1454 EXPORT_SYMBOL(flow_keys_basic_dissector);
1455
1456 static int __init init_default_flow_dissectors(void)
1457 {
1458         skb_flow_dissector_init(&flow_keys_dissector,
1459                                 flow_keys_dissector_keys,
1460                                 ARRAY_SIZE(flow_keys_dissector_keys));
1461         skb_flow_dissector_init(&flow_keys_dissector_symmetric,
1462                                 flow_keys_dissector_symmetric_keys,
1463                                 ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
1464         skb_flow_dissector_init(&flow_keys_basic_dissector,
1465                                 flow_keys_basic_dissector_keys,
1466                                 ARRAY_SIZE(flow_keys_basic_dissector_keys));
1467         return 0;
1468 }
1469
1470 core_initcall(init_default_flow_dissectors);