1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
10 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
11 static const char * const func_id_str[] = {
12 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
14 #undef __BPF_FUNC_STR_FN
16 static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
17 const struct bpf_insn *insn,
18 char *buff, size_t len)
20 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
23 insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
24 func_id_str[insn->imm])
25 return func_id_str[insn->imm];
27 if (cbs && cbs->cb_call) {
30 res = cbs->cb_call(cbs->private_data, insn);
35 if (insn->src_reg == BPF_PSEUDO_CALL)
36 snprintf(buff, len, "%+d", insn->imm);
37 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
38 snprintf(buff, len, "kernel-function");
43 static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
44 const struct bpf_insn *insn,
45 u64 full_imm, char *buff, size_t len)
47 if (cbs && cbs->cb_imm)
48 return cbs->cb_imm(cbs->private_data, insn, full_imm);
50 snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
54 const char *func_id_name(int id)
56 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
57 return func_id_str[id];
62 const char *const bpf_class_string[8] = {
69 [BPF_JMP32] = "jmp32",
70 [BPF_ALU64] = "alu64",
73 const char *const bpf_alu_string[16] = {
74 [BPF_ADD >> 4] = "+=",
75 [BPF_SUB >> 4] = "-=",
76 [BPF_MUL >> 4] = "*=",
77 [BPF_DIV >> 4] = "/=",
79 [BPF_AND >> 4] = "&=",
80 [BPF_LSH >> 4] = "<<=",
81 [BPF_RSH >> 4] = ">>=",
82 [BPF_NEG >> 4] = "neg",
83 [BPF_MOD >> 4] = "%=",
84 [BPF_XOR >> 4] = "^=",
86 [BPF_ARSH >> 4] = "s>>=",
87 [BPF_END >> 4] = "endian",
90 static const char *const bpf_alu_sign_string[16] = {
91 [BPF_DIV >> 4] = "s/=",
92 [BPF_MOD >> 4] = "s%=",
95 static const char *const bpf_movsx_string[4] = {
101 static const char *const bpf_atomic_alu_string[16] = {
102 [BPF_ADD >> 4] = "add",
103 [BPF_AND >> 4] = "and",
104 [BPF_OR >> 4] = "or",
105 [BPF_XOR >> 4] = "xor",
108 static const char *const bpf_ldst_string[] = {
109 [BPF_W >> 3] = "u32",
110 [BPF_H >> 3] = "u16",
112 [BPF_DW >> 3] = "u64",
115 static const char *const bpf_ldsx_string[] = {
116 [BPF_W >> 3] = "s32",
117 [BPF_H >> 3] = "s16",
121 static const char *const bpf_jmp_string[16] = {
122 [BPF_JA >> 4] = "jmp",
123 [BPF_JEQ >> 4] = "==",
124 [BPF_JGT >> 4] = ">",
125 [BPF_JLT >> 4] = "<",
126 [BPF_JGE >> 4] = ">=",
127 [BPF_JLE >> 4] = "<=",
128 [BPF_JSET >> 4] = "&",
129 [BPF_JNE >> 4] = "!=",
130 [BPF_JSGT >> 4] = "s>",
131 [BPF_JSLT >> 4] = "s<",
132 [BPF_JSGE >> 4] = "s>=",
133 [BPF_JSLE >> 4] = "s<=",
134 [BPF_CALL >> 4] = "call",
135 [BPF_EXIT >> 4] = "exit",
138 static void print_bpf_end_insn(bpf_insn_print_t verbose,
140 const struct bpf_insn *insn)
142 verbose(private_data, "(%02x) r%d = %s%d r%d\n",
143 insn->code, insn->dst_reg,
144 BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le",
145 insn->imm, insn->dst_reg);
148 static void print_bpf_bswap_insn(bpf_insn_print_t verbose,
150 const struct bpf_insn *insn)
152 verbose(private_data, "(%02x) r%d = bswap%d r%d\n",
153 insn->code, insn->dst_reg,
154 insn->imm, insn->dst_reg);
157 static bool is_sdiv_smod(const struct bpf_insn *insn)
159 return (BPF_OP(insn->code) == BPF_DIV || BPF_OP(insn->code) == BPF_MOD) &&
163 static bool is_movsx(const struct bpf_insn *insn)
165 return BPF_OP(insn->code) == BPF_MOV &&
166 (insn->off == 8 || insn->off == 16 || insn->off == 32);
169 void print_bpf_insn(const struct bpf_insn_cbs *cbs,
170 const struct bpf_insn *insn,
171 bool allow_ptr_leaks)
173 const bpf_insn_print_t verbose = cbs->cb_print;
174 u8 class = BPF_CLASS(insn->code);
176 if (class == BPF_ALU || class == BPF_ALU64) {
177 if (BPF_OP(insn->code) == BPF_END) {
178 if (class == BPF_ALU64)
179 print_bpf_bswap_insn(verbose, cbs->private_data, insn);
181 print_bpf_end_insn(verbose, cbs->private_data, insn);
182 } else if (BPF_OP(insn->code) == BPF_NEG) {
183 verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n",
184 insn->code, class == BPF_ALU ? 'w' : 'r',
185 insn->dst_reg, class == BPF_ALU ? 'w' : 'r',
187 } else if (BPF_SRC(insn->code) == BPF_X) {
188 verbose(cbs->private_data, "(%02x) %c%d %s %s%c%d\n",
189 insn->code, class == BPF_ALU ? 'w' : 'r',
191 is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4]
192 : bpf_alu_string[BPF_OP(insn->code) >> 4],
193 is_movsx(insn) ? bpf_movsx_string[(insn->off >> 3) - 1] : "",
194 class == BPF_ALU ? 'w' : 'r',
197 verbose(cbs->private_data, "(%02x) %c%d %s %d\n",
198 insn->code, class == BPF_ALU ? 'w' : 'r',
200 is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4]
201 : bpf_alu_string[BPF_OP(insn->code) >> 4],
204 } else if (class == BPF_STX) {
205 if (BPF_MODE(insn->code) == BPF_MEM)
206 verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = r%d\n",
208 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
210 insn->off, insn->src_reg);
211 else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
212 (insn->imm == BPF_ADD || insn->imm == BPF_AND ||
213 insn->imm == BPF_OR || insn->imm == BPF_XOR)) {
214 verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) %s r%d\n",
216 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
217 insn->dst_reg, insn->off,
218 bpf_alu_string[BPF_OP(insn->imm) >> 4],
220 } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
221 (insn->imm == (BPF_ADD | BPF_FETCH) ||
222 insn->imm == (BPF_AND | BPF_FETCH) ||
223 insn->imm == (BPF_OR | BPF_FETCH) ||
224 insn->imm == (BPF_XOR | BPF_FETCH))) {
225 verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_%s((%s *)(r%d %+d), r%d)\n",
226 insn->code, insn->src_reg,
227 BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
228 bpf_atomic_alu_string[BPF_OP(insn->imm) >> 4],
229 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
230 insn->dst_reg, insn->off, insn->src_reg);
231 } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
232 insn->imm == BPF_CMPXCHG) {
233 verbose(cbs->private_data, "(%02x) r0 = atomic%s_cmpxchg((%s *)(r%d %+d), r0, r%d)\n",
235 BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
236 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
237 insn->dst_reg, insn->off,
239 } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
240 insn->imm == BPF_XCHG) {
241 verbose(cbs->private_data, "(%02x) r%d = atomic%s_xchg((%s *)(r%d %+d), r%d)\n",
242 insn->code, insn->src_reg,
243 BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
244 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
245 insn->dst_reg, insn->off, insn->src_reg);
247 verbose(cbs->private_data, "BUG_%02x\n", insn->code);
249 } else if (class == BPF_ST) {
250 if (BPF_MODE(insn->code) == BPF_MEM) {
251 verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
253 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
255 insn->off, insn->imm);
256 } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
257 verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
259 verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
261 } else if (class == BPF_LDX) {
262 if (BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) {
263 verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
266 verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n",
267 insn->code, insn->dst_reg,
268 BPF_MODE(insn->code) == BPF_MEM ?
269 bpf_ldst_string[BPF_SIZE(insn->code) >> 3] :
270 bpf_ldsx_string[BPF_SIZE(insn->code) >> 3],
271 insn->src_reg, insn->off);
272 } else if (class == BPF_LD) {
273 if (BPF_MODE(insn->code) == BPF_ABS) {
274 verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[%d]\n",
276 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
278 } else if (BPF_MODE(insn->code) == BPF_IND) {
279 verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[r%d + %d]\n",
281 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
282 insn->src_reg, insn->imm);
283 } else if (BPF_MODE(insn->code) == BPF_IMM &&
284 BPF_SIZE(insn->code) == BPF_DW) {
285 /* At this point, we already made sure that the second
286 * part of the ldimm64 insn is accessible.
288 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
289 bool is_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD ||
290 insn->src_reg == BPF_PSEUDO_MAP_VALUE;
293 if (is_ptr && !allow_ptr_leaks)
296 verbose(cbs->private_data, "(%02x) r%d = %s\n",
297 insn->code, insn->dst_reg,
298 __func_imm_name(cbs, insn, imm,
301 verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code);
304 } else if (class == BPF_JMP32 || class == BPF_JMP) {
305 u8 opcode = BPF_OP(insn->code);
307 if (opcode == BPF_CALL) {
310 if (insn->src_reg == BPF_PSEUDO_CALL) {
311 verbose(cbs->private_data, "(%02x) call pc%s\n",
313 __func_get_name(cbs, insn,
316 strcpy(tmp, "unknown");
317 verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code,
318 __func_get_name(cbs, insn,
322 } else if (insn->code == (BPF_JMP | BPF_JA)) {
323 verbose(cbs->private_data, "(%02x) goto pc%+d\n",
324 insn->code, insn->off);
325 } else if (insn->code == (BPF_JMP32 | BPF_JA)) {
326 verbose(cbs->private_data, "(%02x) gotol pc%+d\n",
327 insn->code, insn->imm);
328 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
329 verbose(cbs->private_data, "(%02x) exit\n", insn->code);
330 } else if (BPF_SRC(insn->code) == BPF_X) {
331 verbose(cbs->private_data,
332 "(%02x) if %c%d %s %c%d goto pc%+d\n",
333 insn->code, class == BPF_JMP32 ? 'w' : 'r',
335 bpf_jmp_string[BPF_OP(insn->code) >> 4],
336 class == BPF_JMP32 ? 'w' : 'r',
337 insn->src_reg, insn->off);
339 verbose(cbs->private_data,
340 "(%02x) if %c%d %s 0x%x goto pc%+d\n",
341 insn->code, class == BPF_JMP32 ? 'w' : 'r',
343 bpf_jmp_string[BPF_OP(insn->code) >> 4],
344 insn->imm, insn->off);
347 verbose(cbs->private_data, "(%02x) %s\n",
348 insn->code, bpf_class_string[class]);