__be64 seg_addr; /* Base address (see note) of this segment */
} __packed;
+struct gve_tx_mtd_desc {
+ u8 type_flags; /* type is lower 4 bits, subtype upper */
+ u8 path_state; /* state is lower 4 bits, hash type upper */
+ __be16 reserved0;
+ __be32 path_hash;
+ __be64 reserved1;
+} __packed;
+
struct gve_tx_seg_desc {
u8 type_flags; /* type is lower 4 bits, flags upper */
u8 l3_offset; /* TSO: 2 byte units to start of IPH */
#define GVE_TXD_STD (0x0 << 4) /* Std with Host Address */
#define GVE_TXD_TSO (0x1 << 4) /* TSO with Host Address */
#define GVE_TXD_SEG (0x2 << 4) /* Seg with Host Address */
+#define GVE_TXD_MTD (0x3 << 4) /* Metadata */
/* GVE Transmit Descriptor Flags for Std Pkts */
#define GVE_TXF_L4CSUM BIT(0) /* Need csum offload */
/* GVE Transmit Descriptor Flags for TSO Segs */
#define GVE_TXSF_IPV6 BIT(1) /* IPv6 TSO */
+/* GVE Transmit Descriptor Options for MTD Segs */
+#define GVE_MTD_SUBTYPE_PATH 0
+
+#define GVE_MTD_PATH_STATE_DEFAULT 0
+#define GVE_MTD_PATH_STATE_TIMEOUT 1
+#define GVE_MTD_PATH_STATE_CONGESTION 2
+#define GVE_MTD_PATH_STATE_RETRANSMIT 3
+
+#define GVE_MTD_PATH_HASH_NONE (0x0 << 4)
+#define GVE_MTD_PATH_HASH_L4 (0x1 << 4)
+
/* GVE Receive Packet Descriptor */
/* The start of an ethernet packet comes 2 bytes into the rx buffer.
* gVNIC adds this padding so that both the DMA and the L3/4 protocol header
return bytes;
}
-/* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag,
- * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor,
- * and +1 if the payload wraps to the beginning of the FIFO.
+/* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
+ * 1 for each skb frag
+ * 1 for the skb linear portion
+ * 1 for when tcp hdr needs to be in separate descriptor
+ * 1 if the payload wraps to the beginning of the FIFO
+ * 1 for metadata descriptor
*/
-#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3)
+#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
if (info->skb) {
pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
}
+static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
+ struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
+
+ mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
+ mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
+ GVE_MTD_PATH_HASH_L4;
+ mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
+ mtd_desc->mtd.reserved0 = 0;
+ mtd_desc->mtd.reserved1 = 0;
+}
+
static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
struct sk_buff *skb, bool is_gso,
u16 len, u64 addr)
int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
union gve_tx_desc *pkt_desc, *seg_desc;
struct gve_tx_buffer_state *info;
+ int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
int payload_iov = 2;
&info->iov[payload_iov]);
gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen,
+ 1 + mtd_desc_nr + payload_nfrags, hlen,
info->iov[hdr_nfrags - 1].iov_offset);
skb_copy_bits(skb, 0,
info->iov[hdr_nfrags - 1].iov_len);
copy_offset = hlen;
+ if (mtd_desc_nr) {
+ next_idx = (tx->req + 1) & tx->mask;
+ gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
+ }
+
for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
- next_idx = (tx->req + 1 + i - payload_iov) & tx->mask;
+ next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
seg_desc = &tx->desc[next_idx];
gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
copy_offset += info->iov[i].iov_len;
}
- return 1 + payload_nfrags;
+ return 1 + mtd_desc_nr + payload_nfrags;
}
static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
- int hlen, payload_nfrags, l4_hdr_offset;
- union gve_tx_desc *pkt_desc, *seg_desc;
+ int hlen, num_descriptors, l4_hdr_offset;
+ union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
struct gve_tx_buffer_state *info;
+ int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
u64 addr;
dma_unmap_len_set(info, len, len);
dma_unmap_addr_set(info, dma, addr);
- payload_nfrags = shinfo->nr_frags;
+ num_descriptors = 1 + shinfo->nr_frags;
+ if (hlen < len)
+ num_descriptors++;
+ if (mtd_desc_nr)
+ num_descriptors++;
+
+ gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
+ num_descriptors, hlen, addr);
+
+ if (mtd_desc_nr) {
+ idx = (idx + 1) & tx->mask;
+ mtd_desc = &tx->desc[idx];
+ gve_tx_fill_mtd_desc(mtd_desc, skb);
+ }
+
if (hlen < len) {
/* For gso the rest of the linear portion of the skb needs to
* be in its own descriptor.
*/
- payload_nfrags++;
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen, addr);
-
len -= hlen;
addr += hlen;
- idx = (tx->req + 1) & tx->mask;
+ idx = (idx + 1) & tx->mask;
seg_desc = &tx->desc[idx];
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
- } else {
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen, addr);
}
for (i = 0; i < shinfo->nr_frags; i++) {
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
}
- return 1 + payload_nfrags;
+ return num_descriptors;
unmap_drop:
- i += (payload_nfrags == shinfo->nr_frags ? 1 : 2);
+ i += num_descriptors - shinfo->nr_frags;
while (i--) {
+ /* Skip metadata descriptor, if set */
+ if (i == 1 && mtd_desc_nr == 1)
+ continue;
idx--;
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}