return hns_mac_set_mtu(mac_cb, new_mtu);
}
+static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
+{
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ hns_ppe_set_tso_enable(ppe_cb, enable);
+}
+
static int hns_ae_start(struct hnae_handle *handle)
{
int ret;
.set_mc_addr = hns_ae_set_multicast_one,
.set_mtu = hns_ae_set_mtu,
.update_stats = hns_ae_update_stats,
+ .set_tso_stats = hns_ae_set_tso_stats,
.get_stats = hns_ae_get_stats,
.get_strings = hns_ae_get_strings,
.get_sset_count = hns_ae_get_sset_count,
return 0;
}
+static int hns_nic_maybe_stop_tso(
+ struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
+{
+ int i;
+ int size;
+ int buf_num;
+ int frag_num;
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
+ struct skb_frag_struct *frag;
+
+ size = skb_headlen(skb);
+ buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < frag_num; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ size = skb_frag_size(frag);
+ buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ }
+
+ if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+ buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ if (ring_space(ring) < buf_num)
+ return -EBUSY;
+ /* manual split the send packet */
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+
+ } else if (ring_space(ring) < buf_num) {
+ return -EBUSY;
+ }
+
+ *bnum = buf_num;
+ return 0;
+}
+
+static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ int frag_buf_num;
+ int sizeoflast;
+ int k;
+
+ frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ sizeoflast = size % BD_MAX_SEND_SIZE;
+ sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
+
+ /* when the frag size is bigger than hardware, split this frag */
+ for (k = 0; k < frag_buf_num; k++)
+ fill_v2_desc(ring, priv,
+ (k == frag_buf_num - 1) ?
+ sizeoflast : BD_MAX_SEND_SIZE,
+ dma + BD_MAX_SEND_SIZE * k,
+ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+ buf_num,
+ (type == DESC_TYPE_SKB && !k) ?
+ DESC_TYPE_SKB : DESC_TYPE_PAGE,
+ mtu);
+}
+
int hns_nic_net_xmit_hw(struct net_device *ndev,
struct sk_buff *skb,
struct hns_nic_ring_data *ring_data)
static void hns_nic_set_priv_ops(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
if (AE_IS_VER1(priv->enet_ver)) {
priv->ops.fill_desc = fill_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
} else {
priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
- priv->ops.fill_desc = fill_v2_desc;
- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ if ((netdev->features & NETIF_F_TSO) ||
+ (netdev->features & NETIF_F_TSO6)) {
+ priv->ops.fill_desc = fill_tso_desc;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+ /* This chip only support 7*4096 */
+ netif_set_gso_max_size(netdev, 7 * 4096);
+ h->dev->ops->set_tso_stats(h, 1);
+ } else {
+ priv->ops.fill_desc = fill_v2_desc;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ }
}
}
switch (priv->enet_ver) {
case AE_VERSION_2:
+ ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
- NETIF_F_GRO;
+ NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
break;
default:
break;