Add support for configuring the max SDU for each Tx queue.
If not specified, keep the default.
Signed-off-by: Tan Tee Min <tee.min.tan@linux.intel.com>
Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
Tested-by: Naama Meir <naamax.meir@linux.intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
u32 start_time;
u32 end_time;
u32 start_time;
u32 end_time;
/* CBS parameters */
bool cbs_enable; /* indicates if CBS is enabled */
/* CBS parameters */
bool cbs_enable; /* indicates if CBS is enabled */
u64 o2bspc;
u64 b2ospc;
u64 b2ogprc;
u64 o2bspc;
u64 b2ospc;
u64 b2ogprc;
};
struct net_device *igc_get_hw_dev(struct igc_hw *hw);
};
struct net_device *igc_get_hw_dev(struct igc_hw *hw);
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
bool first_flag = false, insert_empty = false;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
bool first_flag = false, insert_empty = false;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
first->bytecount = skb->len;
first->gso_segs = 1;
first->bytecount = skb->len;
first->gso_segs = 1;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+ if (tx_ring->max_sdu > 0) {
+ u32 max_sdu = 0;
+
+ max_sdu = tx_ring->max_sdu +
+ (skb_vlan_tagged(first->skb) ? VLAN_HLEN : 0);
+ if (first->bytecount > max_sdu) {
+ adapter->stats.txdrop++;
+ goto out_drop;
+ }
+ }
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
/* FIXME: add support for retrieving timestamps from
* the other timer registers before skipping the
* timestamping request.
/* FIXME: add support for retrieving timestamps from
* the other timer registers before skipping the
* timestamping request.
net_stats->tx_window_errors = adapter->stats.latecol;
net_stats->tx_carrier_errors = adapter->stats.tncrs;
net_stats->tx_window_errors = adapter->stats.latecol;
net_stats->tx_carrier_errors = adapter->stats.tncrs;
- /* Tx Dropped needs to be maintained elsewhere */
+ /* Tx Dropped */
+ net_stats->tx_dropped = adapter->stats.txdrop;
/* Management Stats */
adapter->stats.mgptc += rd32(IGC_MGTPTC);
/* Management Stats */
adapter->stats.mgptc += rd32(IGC_MGTPTC);
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+ struct net_device *dev = adapter->netdev;
+
+ if (qopt->max_sdu[i])
+ ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len;
+ else
+ ring->max_sdu = 0;
+ }
+
caps->broken_mqprio = true;
caps->broken_mqprio = true;
- if (hw->mac.type == igc_i225)
+ if (hw->mac.type == igc_i225) {
+ caps->supports_queue_max_sdu = true;
caps->gate_mask_per_txq = true;
caps->gate_mask_per_txq = true;