2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #include <linux/uaccess.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <net/llc_pdu.h>
25 #include <linux/kernel.h>
26 #include <linux/jhash.h>
27 #include <linux/jiffies.h>
28 #include <linux/llc.h>
29 #include <linux/module.h>
31 #include <linux/rcupdate.h>
32 #include <linux/cpumask.h>
33 #include <linux/if_arp.h>
35 #include <linux/ipv6.h>
36 #include <linux/mpls.h>
37 #include <linux/sctp.h>
38 #include <linux/smp.h>
39 #include <linux/tcp.h>
40 #include <linux/udp.h>
41 #include <linux/icmp.h>
42 #include <linux/icmpv6.h>
43 #include <linux/rculist.h>
45 #include <net/ip_tunnels.h>
48 #include <net/ndisc.h>
50 #include "conntrack.h"
53 #include "flow_netlink.h"
56 u64 ovs_flow_used_time(unsigned long flow_jiffies)
58 struct timespec cur_ts;
61 ktime_get_ts(&cur_ts);
62 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
63 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
64 cur_ts.tv_nsec / NSEC_PER_MSEC;
66 return cur_ms - idle_ms;
69 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
71 void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
72 const struct sk_buff *skb)
74 struct flow_stats *stats;
75 int node = numa_node_id();
76 int cpu = smp_processor_id();
77 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
79 stats = rcu_dereference(flow->stats[cpu]);
81 /* Check if already have CPU-specific stats. */
83 spin_lock(&stats->lock);
84 /* Mark if we write on the pre-allocated stats. */
85 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
86 flow->stats_last_writer = cpu;
88 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
89 spin_lock(&stats->lock);
91 /* If the current CPU is the only writer on the
92 * pre-allocated stats keep using them.
94 if (unlikely(flow->stats_last_writer != cpu)) {
95 /* A previous locker may have already allocated the
96 * stats, so we need to check again. If CPU-specific
97 * stats were already allocated, we update the pre-
98 * allocated stats as we have already locked them.
100 if (likely(flow->stats_last_writer != -1) &&
101 likely(!rcu_access_pointer(flow->stats[cpu]))) {
102 /* Try to allocate CPU-specific stats. */
103 struct flow_stats *new_stats;
106 kmem_cache_alloc_node(flow_stats_cache,
112 if (likely(new_stats)) {
113 new_stats->used = jiffies;
114 new_stats->packet_count = 1;
115 new_stats->byte_count = len;
116 new_stats->tcp_flags = tcp_flags;
117 spin_lock_init(&new_stats->lock);
119 rcu_assign_pointer(flow->stats[cpu],
124 flow->stats_last_writer = cpu;
128 stats->used = jiffies;
129 stats->packet_count++;
130 stats->byte_count += len;
131 stats->tcp_flags |= tcp_flags;
133 spin_unlock(&stats->lock);
136 /* Must be called with rcu_read_lock or ovs_mutex. */
137 void ovs_flow_stats_get(const struct sw_flow *flow,
138 struct ovs_flow_stats *ovs_stats,
139 unsigned long *used, __be16 *tcp_flags)
145 memset(ovs_stats, 0, sizeof(*ovs_stats));
147 /* We open code this to make sure cpu 0 is always considered */
148 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
149 struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
152 /* Local CPU may write on non-local stats, so we must
153 * block bottom-halves here.
155 spin_lock_bh(&stats->lock);
156 if (!*used || time_after(stats->used, *used))
158 *tcp_flags |= stats->tcp_flags;
159 ovs_stats->n_packets += stats->packet_count;
160 ovs_stats->n_bytes += stats->byte_count;
161 spin_unlock_bh(&stats->lock);
166 /* Called with ovs_mutex. */
167 void ovs_flow_stats_clear(struct sw_flow *flow)
171 /* We open code this to make sure cpu 0 is always considered */
172 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
173 struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
176 spin_lock_bh(&stats->lock);
178 stats->packet_count = 0;
179 stats->byte_count = 0;
180 stats->tcp_flags = 0;
181 spin_unlock_bh(&stats->lock);
186 static int check_header(struct sk_buff *skb, int len)
188 if (unlikely(skb->len < len))
190 if (unlikely(!pskb_may_pull(skb, len)))
195 static bool arphdr_ok(struct sk_buff *skb)
197 return pskb_may_pull(skb, skb_network_offset(skb) +
198 sizeof(struct arp_eth_header));
201 static int check_iphdr(struct sk_buff *skb)
203 unsigned int nh_ofs = skb_network_offset(skb);
207 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
211 ip_len = ip_hdrlen(skb);
212 if (unlikely(ip_len < sizeof(struct iphdr) ||
213 skb->len < nh_ofs + ip_len))
216 skb_set_transport_header(skb, nh_ofs + ip_len);
220 static bool tcphdr_ok(struct sk_buff *skb)
222 int th_ofs = skb_transport_offset(skb);
225 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
228 tcp_len = tcp_hdrlen(skb);
229 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
230 skb->len < th_ofs + tcp_len))
236 static bool udphdr_ok(struct sk_buff *skb)
238 return pskb_may_pull(skb, skb_transport_offset(skb) +
239 sizeof(struct udphdr));
242 static bool sctphdr_ok(struct sk_buff *skb)
244 return pskb_may_pull(skb, skb_transport_offset(skb) +
245 sizeof(struct sctphdr));
248 static bool icmphdr_ok(struct sk_buff *skb)
250 return pskb_may_pull(skb, skb_transport_offset(skb) +
251 sizeof(struct icmphdr));
254 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
256 unsigned int nh_ofs = skb_network_offset(skb);
264 err = check_header(skb, nh_ofs + sizeof(*nh));
269 nexthdr = nh->nexthdr;
270 payload_ofs = (u8 *)(nh + 1) - skb->data;
272 key->ip.proto = NEXTHDR_NONE;
273 key->ip.tos = ipv6_get_dsfield(nh);
274 key->ip.ttl = nh->hop_limit;
275 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
276 key->ipv6.addr.src = nh->saddr;
277 key->ipv6.addr.dst = nh->daddr;
279 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
282 if (frag_off & htons(~0x7))
283 key->ip.frag = OVS_FRAG_TYPE_LATER;
285 key->ip.frag = OVS_FRAG_TYPE_FIRST;
287 key->ip.frag = OVS_FRAG_TYPE_NONE;
290 /* Delayed handling of error in ipv6_skip_exthdr() as it
291 * always sets frag_off to a valid value which may be
292 * used to set key->ip.frag above.
294 if (unlikely(payload_ofs < 0))
297 nh_len = payload_ofs - nh_ofs;
298 skb_set_transport_header(skb, nh_ofs + nh_len);
299 key->ip.proto = nexthdr;
303 static bool icmp6hdr_ok(struct sk_buff *skb)
305 return pskb_may_pull(skb, skb_transport_offset(skb) +
306 sizeof(struct icmp6hdr));
310 * Parse vlan tag from vlan header.
311 * Returns ERROR on memory error.
312 * Returns 0 if it encounters a non-vlan or incomplete packet.
313 * Returns 1 after successfully parsing vlan tag.
315 static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
317 struct vlan_head *vh = (struct vlan_head *)skb->data;
319 if (likely(!eth_type_vlan(vh->tpid)))
322 if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16)))
325 if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) +
329 vh = (struct vlan_head *)skb->data;
330 key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
331 key_vh->tpid = vh->tpid;
333 __skb_pull(skb, sizeof(struct vlan_head));
337 static void clear_vlan(struct sw_flow_key *key)
339 key->eth.vlan.tci = 0;
340 key->eth.vlan.tpid = 0;
341 key->eth.cvlan.tci = 0;
342 key->eth.cvlan.tpid = 0;
345 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
349 if (skb_vlan_tag_present(skb)) {
350 key->eth.vlan.tci = htons(skb->vlan_tci);
351 key->eth.vlan.tpid = skb->vlan_proto;
353 /* Parse outer vlan tag in the non-accelerated case. */
354 res = parse_vlan_tag(skb, &key->eth.vlan);
359 /* Parse inner vlan tag. */
360 res = parse_vlan_tag(skb, &key->eth.cvlan);
367 static __be16 parse_ethertype(struct sk_buff *skb)
369 struct llc_snap_hdr {
370 u8 dsap; /* Always 0xAA */
371 u8 ssap; /* Always 0xAA */
376 struct llc_snap_hdr *llc;
379 proto = *(__be16 *) skb->data;
380 __skb_pull(skb, sizeof(__be16));
382 if (eth_proto_is_802_3(proto))
385 if (skb->len < sizeof(struct llc_snap_hdr))
386 return htons(ETH_P_802_2);
388 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
391 llc = (struct llc_snap_hdr *) skb->data;
392 if (llc->dsap != LLC_SAP_SNAP ||
393 llc->ssap != LLC_SAP_SNAP ||
394 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
395 return htons(ETH_P_802_2);
397 __skb_pull(skb, sizeof(struct llc_snap_hdr));
399 if (eth_proto_is_802_3(llc->ethertype))
400 return llc->ethertype;
402 return htons(ETH_P_802_2);
405 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
408 struct icmp6hdr *icmp = icmp6_hdr(skb);
410 /* The ICMPv6 type and code fields use the 16-bit transport port
411 * fields, so we need to store them in 16-bit network byte order.
413 key->tp.src = htons(icmp->icmp6_type);
414 key->tp.dst = htons(icmp->icmp6_code);
415 memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
417 if (icmp->icmp6_code == 0 &&
418 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
419 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
420 int icmp_len = skb->len - skb_transport_offset(skb);
424 /* In order to process neighbor discovery options, we need the
427 if (unlikely(icmp_len < sizeof(*nd)))
430 if (unlikely(skb_linearize(skb)))
433 nd = (struct nd_msg *)skb_transport_header(skb);
434 key->ipv6.nd.target = nd->target;
436 icmp_len -= sizeof(*nd);
438 while (icmp_len >= 8) {
439 struct nd_opt_hdr *nd_opt =
440 (struct nd_opt_hdr *)(nd->opt + offset);
441 int opt_len = nd_opt->nd_opt_len * 8;
443 if (unlikely(!opt_len || opt_len > icmp_len))
446 /* Store the link layer address if the appropriate
447 * option is provided. It is considered an error if
448 * the same link layer option is specified twice.
450 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
452 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
454 ether_addr_copy(key->ipv6.nd.sll,
455 &nd->opt[offset+sizeof(*nd_opt)]);
456 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
458 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
460 ether_addr_copy(key->ipv6.nd.tll,
461 &nd->opt[offset+sizeof(*nd_opt)]);
472 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
473 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
474 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
480 * key_extract - extracts a flow key from an Ethernet frame.
481 * @skb: sk_buff that contains the frame, with skb->data pointing to the
483 * @key: output flow key
485 * The caller must ensure that skb->len >= ETH_HLEN.
487 * Returns 0 if successful, otherwise a negative errno value.
489 * Initializes @skb header fields as follows:
491 * - skb->mac_header: the L2 header.
493 * - skb->network_header: just past the L2 header, or just past the
494 * VLAN header, to the first byte of the L2 payload.
496 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
497 * on output, then just past the IP header, if one is present and
498 * of a correct length, otherwise the same as skb->network_header.
499 * For other key->eth.type values it is left untouched.
501 * - skb->protocol: the type of the data starting at skb->network_header.
502 * Equals to key->eth.type.
504 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
509 /* Flags are always used as part of stats */
512 skb_reset_mac_header(skb);
516 if (key->mac_proto == MAC_PROTO_NONE) {
517 if (unlikely(eth_type_vlan(skb->protocol)))
520 skb_reset_network_header(skb);
523 ether_addr_copy(key->eth.src, eth->h_source);
524 ether_addr_copy(key->eth.dst, eth->h_dest);
526 __skb_pull(skb, 2 * ETH_ALEN);
527 /* We are going to push all headers that we pull, so no need to
528 * update skb->csum here.
531 if (unlikely(parse_vlan(skb, key)))
534 skb->protocol = parse_ethertype(skb);
535 if (unlikely(skb->protocol == htons(0)))
538 skb_reset_network_header(skb);
539 __skb_push(skb, skb->data - skb_mac_header(skb));
541 skb_reset_mac_len(skb);
542 key->eth.type = skb->protocol;
545 if (key->eth.type == htons(ETH_P_IP)) {
549 error = check_iphdr(skb);
550 if (unlikely(error)) {
551 memset(&key->ip, 0, sizeof(key->ip));
552 memset(&key->ipv4, 0, sizeof(key->ipv4));
553 if (error == -EINVAL) {
554 skb->transport_header = skb->network_header;
561 key->ipv4.addr.src = nh->saddr;
562 key->ipv4.addr.dst = nh->daddr;
564 key->ip.proto = nh->protocol;
565 key->ip.tos = nh->tos;
566 key->ip.ttl = nh->ttl;
568 offset = nh->frag_off & htons(IP_OFFSET);
570 key->ip.frag = OVS_FRAG_TYPE_LATER;
573 if (nh->frag_off & htons(IP_MF) ||
574 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
575 key->ip.frag = OVS_FRAG_TYPE_FIRST;
577 key->ip.frag = OVS_FRAG_TYPE_NONE;
579 /* Transport layer. */
580 if (key->ip.proto == IPPROTO_TCP) {
581 if (tcphdr_ok(skb)) {
582 struct tcphdr *tcp = tcp_hdr(skb);
583 key->tp.src = tcp->source;
584 key->tp.dst = tcp->dest;
585 key->tp.flags = TCP_FLAGS_BE16(tcp);
587 memset(&key->tp, 0, sizeof(key->tp));
590 } else if (key->ip.proto == IPPROTO_UDP) {
591 if (udphdr_ok(skb)) {
592 struct udphdr *udp = udp_hdr(skb);
593 key->tp.src = udp->source;
594 key->tp.dst = udp->dest;
596 memset(&key->tp, 0, sizeof(key->tp));
598 } else if (key->ip.proto == IPPROTO_SCTP) {
599 if (sctphdr_ok(skb)) {
600 struct sctphdr *sctp = sctp_hdr(skb);
601 key->tp.src = sctp->source;
602 key->tp.dst = sctp->dest;
604 memset(&key->tp, 0, sizeof(key->tp));
606 } else if (key->ip.proto == IPPROTO_ICMP) {
607 if (icmphdr_ok(skb)) {
608 struct icmphdr *icmp = icmp_hdr(skb);
609 /* The ICMP type and code fields use the 16-bit
610 * transport port fields, so we need to store
611 * them in 16-bit network byte order. */
612 key->tp.src = htons(icmp->type);
613 key->tp.dst = htons(icmp->code);
615 memset(&key->tp, 0, sizeof(key->tp));
619 } else if (key->eth.type == htons(ETH_P_ARP) ||
620 key->eth.type == htons(ETH_P_RARP)) {
621 struct arp_eth_header *arp;
622 bool arp_available = arphdr_ok(skb);
624 arp = (struct arp_eth_header *)skb_network_header(skb);
627 arp->ar_hrd == htons(ARPHRD_ETHER) &&
628 arp->ar_pro == htons(ETH_P_IP) &&
629 arp->ar_hln == ETH_ALEN &&
632 /* We only match on the lower 8 bits of the opcode. */
633 if (ntohs(arp->ar_op) <= 0xff)
634 key->ip.proto = ntohs(arp->ar_op);
638 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
639 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
640 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
641 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
643 memset(&key->ip, 0, sizeof(key->ip));
644 memset(&key->ipv4, 0, sizeof(key->ipv4));
646 } else if (eth_p_mpls(key->eth.type)) {
647 size_t stack_len = MPLS_HLEN;
649 skb_set_inner_network_header(skb, skb->mac_len);
653 error = check_header(skb, skb->mac_len + stack_len);
657 memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
659 if (stack_len == MPLS_HLEN)
660 memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
662 skb_set_inner_network_header(skb, skb->mac_len + stack_len);
663 if (lse & htonl(MPLS_LS_S_MASK))
666 stack_len += MPLS_HLEN;
668 } else if (key->eth.type == htons(ETH_P_IPV6)) {
669 int nh_len; /* IPv6 Header + Extensions */
671 nh_len = parse_ipv6hdr(skb, key);
672 if (unlikely(nh_len < 0)) {
675 memset(&key->ip, 0, sizeof(key->ip));
676 memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
679 skb->transport_header = skb->network_header;
688 if (key->ip.frag == OVS_FRAG_TYPE_LATER)
690 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
691 key->ip.frag = OVS_FRAG_TYPE_FIRST;
693 /* Transport layer. */
694 if (key->ip.proto == NEXTHDR_TCP) {
695 if (tcphdr_ok(skb)) {
696 struct tcphdr *tcp = tcp_hdr(skb);
697 key->tp.src = tcp->source;
698 key->tp.dst = tcp->dest;
699 key->tp.flags = TCP_FLAGS_BE16(tcp);
701 memset(&key->tp, 0, sizeof(key->tp));
703 } else if (key->ip.proto == NEXTHDR_UDP) {
704 if (udphdr_ok(skb)) {
705 struct udphdr *udp = udp_hdr(skb);
706 key->tp.src = udp->source;
707 key->tp.dst = udp->dest;
709 memset(&key->tp, 0, sizeof(key->tp));
711 } else if (key->ip.proto == NEXTHDR_SCTP) {
712 if (sctphdr_ok(skb)) {
713 struct sctphdr *sctp = sctp_hdr(skb);
714 key->tp.src = sctp->source;
715 key->tp.dst = sctp->dest;
717 memset(&key->tp, 0, sizeof(key->tp));
719 } else if (key->ip.proto == NEXTHDR_ICMP) {
720 if (icmp6hdr_ok(skb)) {
721 error = parse_icmpv6(skb, key, nh_len);
725 memset(&key->tp, 0, sizeof(key->tp));
732 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
734 return key_extract(skb, key);
737 static int key_extract_mac_proto(struct sk_buff *skb)
739 switch (skb->dev->type) {
741 return MAC_PROTO_ETHERNET;
743 if (skb->protocol == htons(ETH_P_TEB))
744 return MAC_PROTO_ETHERNET;
745 return MAC_PROTO_NONE;
751 int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
752 struct sk_buff *skb, struct sw_flow_key *key)
756 /* Extract metadata from packet. */
758 key->tun_proto = ip_tunnel_info_af(tun_info);
759 memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
761 if (tun_info->options_len) {
762 BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
764 > sizeof(key->tun_opts));
766 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
768 key->tun_opts_len = tun_info->options_len;
770 key->tun_opts_len = 0;
774 key->tun_opts_len = 0;
775 memset(&key->tun_key, 0, sizeof(key->tun_key));
778 key->phy.priority = skb->priority;
779 key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
780 key->phy.skb_mark = skb->mark;
781 ovs_ct_fill_key(skb, key);
782 key->ovs_flow_hash = 0;
783 res = key_extract_mac_proto(skb);
786 key->mac_proto = res;
789 return key_extract(skb, key);
792 int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
794 struct sw_flow_key *key, bool log)
798 /* Extract metadata from netlink attributes. */
799 err = ovs_nla_get_flow_metadata(net, attr, key, log);
803 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
804 /* key_extract assumes that skb->protocol is set-up for
805 * layer 3 packets which is the case for other callers,
806 * in particular packets recieved from the network stack.
807 * Here the correct value can be set from the metadata
810 skb->protocol = key->eth.type;
814 skb_reset_mac_header(skb);
817 /* Normally, setting the skb 'protocol' field would be
818 * handled by a call to eth_type_trans(), but it assumes
819 * there's a sending device, which we may not have.
821 if (eth_proto_is_802_3(eth->h_proto))
822 skb->protocol = eth->h_proto;
824 skb->protocol = htons(ETH_P_802_2);
827 return key_extract(skb, key);