In order to fix the data-race found by KCSAN, we
can use the new u64_stats_t type and its accessors instead
of plain u64 fields. This will still generate optimal code
for both 32 and 64 bit platforms.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
- tpackets = lb_stats->packets;
- tbytes = lb_stats->bytes;
+ tpackets = u64_stats_read(&lb_stats->packets);
+ tbytes = u64_stats_read(&lb_stats->bytes);
} while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
*bytes += tbytes;
*packets += tpackets;
} __aligned(4 * sizeof(u64));
struct pcpu_lstats {
- u64 packets;
- u64 bytes;
+ u64_stats_t packets;
+ u64_stats_t bytes;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
u64_stats_update_begin(&lstats->syncp);
- lstats->bytes += len;
- lstats->packets++;
+ u64_stats_add(&lstats->bytes, len);
+ u64_stats_inc(&lstats->packets);
u64_stats_update_end(&lstats->syncp);
}