1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef _XDP_SAMPLE_BPF_H
3 #define _XDP_SAMPLE_BPF_H
6 #include <bpf/bpf_tracing.h>
7 #include <bpf/bpf_core_read.h>
8 #include <bpf/bpf_helpers.h>
10 #include "net_shared.h"
11 #include "xdp_sample_shared.h"
20 __uint(type, BPF_MAP_TYPE_ARRAY);
21 __uint(map_flags, BPF_F_MMAPABLE);
22 __type(key, unsigned int);
23 __type(value, struct datarec);
26 extern array_map rx_cnt;
27 extern const volatile int nr_cpus;
30 XDP_REDIRECT_SUCCESS = 0,
31 XDP_REDIRECT_ERROR = 1
34 static __always_inline void swap_src_dst_mac(void *data)
36 unsigned short *p = data;
37 unsigned short dst[3];
51 * Note: including linux/compiler.h or linux/kernel.h for the macros below
52 * conflicts with vmlinux.h include in BPF files, so we define them here.
54 * Following functions are taken from kernel sources and
55 * break aliasing rules in their original form.
57 * While kernel is compiled with -fno-strict-aliasing,
58 * perf uses -Wstrict-aliasing=3 which makes build fail
61 * Using extra __may_alias__ type to allow aliasing
64 typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
65 typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
66 typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
67 typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
69 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
72 case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
73 case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
74 case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
75 case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
77 asm volatile ("" : : : "memory");
78 __builtin_memcpy((void *)res, (const void *)p, size);
79 asm volatile ("" : : : "memory");
83 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
86 case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
87 case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
88 case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
89 case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
91 asm volatile ("" : : : "memory");
92 __builtin_memcpy((void *)p, (const void *)res, size);
93 asm volatile ("" : : : "memory");
97 #define READ_ONCE(x) \
99 union { typeof(x) __val; char __c[1]; } __u = \
101 __read_once_size(&(x), __u.__c, sizeof(x)); \
105 #define WRITE_ONCE(x, val) \
107 union { typeof(x) __val; char __c[1]; } __u = \
108 { .__val = (val) }; \
109 __write_once_size(&(x), __u.__c, sizeof(x)); \
113 /* Add a value using relaxed read and relaxed write. Less expensive than
114 * fetch_add when there is no write concurrency.
116 #define NO_TEAR_ADD(x, val) WRITE_ONCE((x), READ_ONCE(x) + (val))
117 #define NO_TEAR_INC(x) NO_TEAR_ADD((x), 1)
119 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))