return ret;
}
-static bool vxlan_remcsum(struct sk_buff *skb, u32 vxflags, __be32 vni_field)
+static bool vxlan_remcsum(struct vxlanhdr *unparsed,
+ struct sk_buff *skb, u32 vxflags)
{
size_t start, offset, plen;
- if (skb->remcsum_offload)
- return true;
+ if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
+ goto out;
- start = vxlan_rco_start(vni_field);
- offset = start + vxlan_rco_offset(vni_field);
+ start = vxlan_rco_start(unparsed->vx_vni);
+ offset = start + vxlan_rco_offset(unparsed->vx_vni);
plen = sizeof(struct vxlanhdr) + offset + sizeof(u16);
skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
!!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
-
+out:
+ unparsed->vx_flags &= ~VXLAN_HF_RCO;
+ unparsed->vx_vni &= VXLAN_VNI_MASK;
return true;
}
-static void vxlan_parse_gbp_hdr(struct sk_buff *skb, struct vxlan_metadata *md,
+static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
+ struct vxlan_metadata *md,
struct metadata_dst *tun_dst)
{
- struct vxlanhdr_gbp *gbp;
+ struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
+
+ if (!(unparsed->vx_flags & VXLAN_HF_GBP))
+ goto out;
- gbp = (struct vxlanhdr_gbp *)vxlan_hdr(skb);
md->gbp = ntohs(gbp->policy_id);
if (tun_dst)
if (gbp->policy_applied)
md->gbp |= VXLAN_GBP_POLICY_APPLIED;
+
+out:
+ unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
{
struct metadata_dst *tun_dst = NULL;
struct vxlan_sock *vs;
- __be32 flags, vni_field;
+ struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
if (!pskb_may_pull(skb, VXLAN_HLEN))
goto error;
- flags = vxlan_hdr(skb)->vx_flags;
- vni_field = vxlan_hdr(skb)->vx_vni;
-
- if (flags & VXLAN_HF_VNI) {
- flags &= ~VXLAN_HF_VNI;
+ unparsed = *vxlan_hdr(skb);
+ if (unparsed.vx_flags & VXLAN_HF_VNI) {
+ unparsed.vx_flags &= ~VXLAN_HF_VNI;
+ unparsed.vx_vni &= ~VXLAN_VNI_MASK;
} else {
/* VNI flag always required to be set */
goto bad_flags;
if (!vs)
goto drop;
- if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
- if (!vxlan_remcsum(skb, vs->flags, vni_field))
- goto drop;
-
- flags &= ~VXLAN_HF_RCO;
- vni_field &= VXLAN_VNI_MASK;
- }
-
if (vxlan_collect_metadata(vs)) {
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
- vxlan_vni(vni_field), sizeof(*md));
+ vxlan_vni(vxlan_hdr(skb)->vx_vni),
+ sizeof(*md));
if (!tun_dst)
goto drop;
/* For backwards compatibility, only allow reserved fields to be
* used by VXLAN extensions if explicitly requested.
*/
- if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
- vxlan_parse_gbp_hdr(skb, md, tun_dst);
- flags &= ~VXLAN_GBP_USED_BITS;
- }
+ if (vs->flags & VXLAN_F_REMCSUM_RX)
+ if (!vxlan_remcsum(&unparsed, skb, vs->flags))
+ goto drop;
+ if (vs->flags & VXLAN_F_GBP)
+ vxlan_parse_gbp_hdr(&unparsed, md, tun_dst);
- if (flags || vni_field & ~VXLAN_VNI_MASK) {
+ if (unparsed.vx_flags || unparsed.vx_vni) {
/* If there are any unprocessed flags remaining treat
* this as a malformed packet. This behavior diverges from
* VXLAN RFC (RFC7348) which stipulates that bits in reserved
goto bad_flags;
}
- vxlan_rcv(vs, skb, md, vxlan_vni(vni_field), tun_dst);
+ vxlan_rcv(vs, skb, md, vxlan_vni(vxlan_hdr(skb)->vx_vni), tun_dst);
return 0;
drop: