/* Notify the layer above us */
if (likely(skb)) {
- struct hv_netvsc_packet *nvsc_packet
+ const struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
- u32 send_index = nvsc_packet->send_buf_index;
+ u32 send_index = packet->send_buf_index;
+ struct netvsc_stats *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
- q_idx = nvsc_packet->q_idx;
+ q_idx = packet->q_idx;
channel = incoming_channel;
+ tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets += packet->total_packets;
+ tx_stats->bytes += packet->total_bytes;
+ u64_stats_update_end(&tx_stats->syncp);
+
dev_consume_skb_any(skb);
}
packet->total_data_buflen += msd_len;
}
+ if (msdp->pkt) {
+ packet->total_packets += msdp->pkt->total_packets;
+ packet->total_bytes += msdp->pkt->total_bytes;
+ }
+
if (msdp->skb)
dev_consume_skb_any(msdp->skb);
u32 rndis_msg_size;
struct rndis_per_packet_info *ppi;
u32 hash;
- u32 skb_length;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
* more pages we try linearizing it.
*/
- skb_length = skb->len;
num_data_pgs = netvsc_get_slots(skb) + 2;
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
packet->q_idx = skb_get_queue_mapping(skb);
packet->total_data_buflen = skb->len;
+ packet->total_bytes = skb->len;
+ packet->total_packets = 1;
rndis_msg = (struct rndis_message *)skb->head;
skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb);
- if (likely(ret == 0)) {
- struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
-
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += skb_length;
- u64_stats_update_end(&tx_stats->syncp);
+ if (likely(ret == 0))
return NETDEV_TX_OK;
- }
if (ret == -EAGAIN) {
++net_device_ctx->eth_stats.tx_busy;