}
}
-static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
- unsigned int type, __be32 *hash)
+static void
+nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
{
if (!(netdev->features & NETIF_F_RXHASH))
return;
case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX:
- skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
+ meta->hash_type = PKT_HASH_TYPE_L3;
break;
default:
- skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
+ meta->hash_type = PKT_HASH_TYPE_L4;
break;
}
+
+ meta->hash = get_unaligned_be32(hash);
}
static void
-nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
+nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, struct nfp_net_rx_desc *rxd)
{
struct nfp_net_rx_hash *rx_hash = data;
if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
return;
- nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
+ nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
&rx_hash->hash);
}
static void *
-nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
+nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, int meta_len)
{
u32 meta_info;
switch (meta_info & NFP_NET_META_FIELD_MASK) {
case NFP_NET_META_HASH:
meta_info >>= NFP_NET_META_FIELD_SIZE;
- nfp_net_set_hash(netdev, skb,
+ nfp_net_set_hash(netdev, meta,
meta_info & NFP_NET_META_FIELD_MASK,
(__be32 *)data);
data += 4;
break;
case NFP_NET_META_MARK:
- skb->mark = get_unaligned_be32(data);
+ meta->mark = get_unaligned_be32(data);
data += 4;
break;
default:
while (pkts_polled < budget) {
unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
- u8 meta_prepend[NFP_NET_MAX_PREPEND];
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
dma_addr_t new_dma_addr;
void *new_frag;
- u8 *meta;
idx = rx_ring->rd_p & (rx_ring->cnt - 1);
*/
dma_rmb();
+ memset(&meta, 0, sizeof(meta));
+
rx_ring->rd_p++;
pkts_polled++;
r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync);
- /* Pointer to start of metadata */
- meta = rxbuf->frag + meta_off;
-
if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
(dp->rx_offset && meta_len > dp->rx_offset))) {
nn_dp_warn(dp, "oversized RX packet metadata %u\n",
nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
data_len);
+ if (!dp->chained_metadata_format) {
+ nfp_net_set_hash_desc(dp->netdev, &meta,
+ rxbuf->frag + meta_off, rxd);
+ } else if (meta_len) {
+ void *end;
+
+ end = nfp_net_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ meta_len);
+ if (unlikely(end != rxbuf->frag + pkt_off)) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
dp->bpf_offload_xdp)) {
unsigned int dma_off;
hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
- /* Move prepend out of the way */
- if (xdp_prog->xdp_adjust_head) {
- memcpy(meta_prepend, meta, meta_len);
- meta = meta_prepend;
- }
-
act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
&pkt_off, &pkt_len);
switch (act) {
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
- if (!dp->chained_metadata_format) {
- nfp_net_set_hash_desc(dp->netdev, skb, meta, rxd);
- } else if (meta_len) {
- void *end;
-
- end = nfp_net_parse_meta(dp->netdev, skb, meta,
- meta_len);
- if (unlikely(end != meta + meta_len)) {
- nn_dp_warn(dp, "invalid RX packet metadata\n");
- nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
- continue;
- }
- }
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, dp->netdev);