2 "reference tracking: leak potential reference",
4 BPF_SK_LOOKUP(sk_lookup_tcp),
5 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
8 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9 .errstr = "Unreleased reference",
13 "reference tracking: leak potential reference to sock_common",
15 BPF_SK_LOOKUP(skc_lookup_tcp),
16 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
19 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
20 .errstr = "Unreleased reference",
24 "reference tracking: leak potential reference on stack",
26 BPF_SK_LOOKUP(sk_lookup_tcp),
27 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
28 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
29 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
30 BPF_MOV64_IMM(BPF_REG_0, 0),
33 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
34 .errstr = "Unreleased reference",
38 "reference tracking: leak potential reference on stack 2",
40 BPF_SK_LOOKUP(sk_lookup_tcp),
41 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
42 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
43 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
44 BPF_MOV64_IMM(BPF_REG_0, 0),
45 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 .errstr = "Unreleased reference",
53 "reference tracking: zero potential reference",
55 BPF_SK_LOOKUP(sk_lookup_tcp),
56 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
59 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
60 .errstr = "Unreleased reference",
64 "reference tracking: zero potential reference to sock_common",
66 BPF_SK_LOOKUP(skc_lookup_tcp),
67 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
70 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
71 .errstr = "Unreleased reference",
75 "reference tracking: copy and zero potential references",
77 BPF_SK_LOOKUP(sk_lookup_tcp),
78 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
79 BPF_MOV64_IMM(BPF_REG_0, 0),
80 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
83 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
84 .errstr = "Unreleased reference",
88 "reference tracking: release reference without check",
90 BPF_SK_LOOKUP(sk_lookup_tcp),
91 /* reference in r0 may be NULL */
92 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
93 BPF_MOV64_IMM(BPF_REG_2, 0),
94 BPF_EMIT_CALL(BPF_FUNC_sk_release),
97 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
98 .errstr = "type=sock_or_null expected=sock",
102 "reference tracking: release reference to sock_common without check",
104 BPF_SK_LOOKUP(skc_lookup_tcp),
105 /* reference in r0 may be NULL */
106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
107 BPF_MOV64_IMM(BPF_REG_2, 0),
108 BPF_EMIT_CALL(BPF_FUNC_sk_release),
111 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
112 .errstr = "type=sock_common_or_null expected=sock",
116 "reference tracking: release reference",
118 BPF_SK_LOOKUP(sk_lookup_tcp),
119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
120 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
121 BPF_EMIT_CALL(BPF_FUNC_sk_release),
124 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
128 "reference tracking: release reference to sock_common",
130 BPF_SK_LOOKUP(skc_lookup_tcp),
131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
132 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
133 BPF_EMIT_CALL(BPF_FUNC_sk_release),
136 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
140 "reference tracking: release reference 2",
142 BPF_SK_LOOKUP(sk_lookup_tcp),
143 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
144 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
146 BPF_EMIT_CALL(BPF_FUNC_sk_release),
149 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
153 "reference tracking: release reference twice",
155 BPF_SK_LOOKUP(sk_lookup_tcp),
156 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
157 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
159 BPF_EMIT_CALL(BPF_FUNC_sk_release),
160 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
161 BPF_EMIT_CALL(BPF_FUNC_sk_release),
164 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
165 .errstr = "type=inv expected=sock",
169 "reference tracking: release reference twice inside branch",
171 BPF_SK_LOOKUP(sk_lookup_tcp),
172 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
173 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
175 BPF_EMIT_CALL(BPF_FUNC_sk_release),
176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
177 BPF_EMIT_CALL(BPF_FUNC_sk_release),
180 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
181 .errstr = "type=inv expected=sock",
185 "reference tracking: alloc, check, free in one subbranch",
187 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
188 offsetof(struct __sk_buff, data)),
189 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
190 offsetof(struct __sk_buff, data_end)),
191 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
193 /* if (offsetof(skb, mark) > data_len) exit; */
194 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
196 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
197 offsetof(struct __sk_buff, mark)),
198 BPF_SK_LOOKUP(sk_lookup_tcp),
199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
200 /* Leak reference in R0 */
202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
203 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
204 BPF_EMIT_CALL(BPF_FUNC_sk_release),
207 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
208 .errstr = "Unreleased reference",
210 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
213 "reference tracking: alloc, check, free in both subbranches",
215 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
216 offsetof(struct __sk_buff, data)),
217 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
218 offsetof(struct __sk_buff, data_end)),
219 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
221 /* if (offsetof(skb, mark) > data_len) exit; */
222 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
224 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
225 offsetof(struct __sk_buff, mark)),
226 BPF_SK_LOOKUP(sk_lookup_tcp),
227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
229 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
230 BPF_EMIT_CALL(BPF_FUNC_sk_release),
232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
234 BPF_EMIT_CALL(BPF_FUNC_sk_release),
237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
239 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
242 "reference tracking in call: free reference in subprog",
244 BPF_SK_LOOKUP(sk_lookup_tcp),
245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
246 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
247 BPF_MOV64_IMM(BPF_REG_0, 0),
251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
253 BPF_EMIT_CALL(BPF_FUNC_sk_release),
256 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
260 "reference tracking in call: free reference in subprog and outside",
262 BPF_SK_LOOKUP(sk_lookup_tcp),
263 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
264 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
266 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
267 BPF_EMIT_CALL(BPF_FUNC_sk_release),
271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
273 BPF_EMIT_CALL(BPF_FUNC_sk_release),
276 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
277 .errstr = "type=inv expected=sock",
281 "reference tracking in call: alloc & leak reference in subprog",
283 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
287 BPF_MOV64_IMM(BPF_REG_0, 0),
291 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
292 BPF_SK_LOOKUP(sk_lookup_tcp),
293 /* spill unchecked sk_ptr into stack of caller */
294 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
298 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
299 .errstr = "Unreleased reference",
303 "reference tracking in call: alloc in subprog, release outside",
305 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
309 BPF_EMIT_CALL(BPF_FUNC_sk_release),
313 BPF_SK_LOOKUP(sk_lookup_tcp),
314 BPF_EXIT_INSN(), /* return sk */
316 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
317 .retval = POINTER_VALUE,
321 "reference tracking in call: sk_ptr leak into caller stack",
323 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
326 BPF_MOV64_IMM(BPF_REG_0, 0),
330 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
332 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
333 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
334 /* spill unchecked sk_ptr into stack of caller */
335 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
337 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
338 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
342 BPF_SK_LOOKUP(sk_lookup_tcp),
345 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
346 .errstr = "Unreleased reference",
350 "reference tracking in call: sk_ptr spill into caller stack",
352 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
355 BPF_MOV64_IMM(BPF_REG_0, 0),
359 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
361 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
363 /* spill unchecked sk_ptr into stack of caller */
364 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
366 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
367 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
368 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
369 /* now the sk_ptr is verified, free the reference */
370 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
371 BPF_EMIT_CALL(BPF_FUNC_sk_release),
375 BPF_SK_LOOKUP(sk_lookup_tcp),
378 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
382 "reference tracking: allow LD_ABS",
384 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
385 BPF_SK_LOOKUP(sk_lookup_tcp),
386 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
387 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
388 BPF_EMIT_CALL(BPF_FUNC_sk_release),
389 BPF_LD_ABS(BPF_B, 0),
390 BPF_LD_ABS(BPF_H, 0),
391 BPF_LD_ABS(BPF_W, 0),
394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
398 "reference tracking: forbid LD_ABS while holding reference",
400 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
401 BPF_SK_LOOKUP(sk_lookup_tcp),
402 BPF_LD_ABS(BPF_B, 0),
403 BPF_LD_ABS(BPF_H, 0),
404 BPF_LD_ABS(BPF_W, 0),
405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
406 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
407 BPF_EMIT_CALL(BPF_FUNC_sk_release),
410 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
411 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
415 "reference tracking: allow LD_IND",
417 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
418 BPF_SK_LOOKUP(sk_lookup_tcp),
419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
420 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
421 BPF_EMIT_CALL(BPF_FUNC_sk_release),
422 BPF_MOV64_IMM(BPF_REG_7, 1),
423 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
424 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
427 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
432 "reference tracking: forbid LD_IND while holding reference",
434 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
435 BPF_SK_LOOKUP(sk_lookup_tcp),
436 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
437 BPF_MOV64_IMM(BPF_REG_7, 1),
438 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
440 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
441 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
442 BPF_EMIT_CALL(BPF_FUNC_sk_release),
445 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
446 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
450 "reference tracking: check reference or tail call",
452 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
453 BPF_SK_LOOKUP(sk_lookup_tcp),
454 /* if (sk) bpf_sk_release() */
455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
456 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
457 /* bpf_tail_call() */
458 BPF_MOV64_IMM(BPF_REG_3, 3),
459 BPF_LD_MAP_FD(BPF_REG_2, 0),
460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
461 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
462 BPF_MOV64_IMM(BPF_REG_0, 0),
464 BPF_EMIT_CALL(BPF_FUNC_sk_release),
467 .fixup_prog1 = { 17 },
468 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
472 "reference tracking: release reference then tail call",
474 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
475 BPF_SK_LOOKUP(sk_lookup_tcp),
476 /* if (sk) bpf_sk_release() */
477 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
478 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
479 BPF_EMIT_CALL(BPF_FUNC_sk_release),
480 /* bpf_tail_call() */
481 BPF_MOV64_IMM(BPF_REG_3, 3),
482 BPF_LD_MAP_FD(BPF_REG_2, 0),
483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
485 BPF_MOV64_IMM(BPF_REG_0, 0),
488 .fixup_prog1 = { 18 },
489 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
493 "reference tracking: leak possible reference over tail call",
495 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
496 /* Look up socket and store in REG_6 */
497 BPF_SK_LOOKUP(sk_lookup_tcp),
498 /* bpf_tail_call() */
499 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
500 BPF_MOV64_IMM(BPF_REG_3, 3),
501 BPF_LD_MAP_FD(BPF_REG_2, 0),
502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
504 BPF_MOV64_IMM(BPF_REG_0, 0),
505 /* if (sk) bpf_sk_release() */
506 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
508 BPF_EMIT_CALL(BPF_FUNC_sk_release),
511 .fixup_prog1 = { 16 },
512 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
513 .errstr = "tail_call would lead to reference leak",
517 "reference tracking: leak checked reference over tail call",
519 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
520 /* Look up socket and store in REG_6 */
521 BPF_SK_LOOKUP(sk_lookup_tcp),
522 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
523 /* if (!sk) goto end */
524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
525 /* bpf_tail_call() */
526 BPF_MOV64_IMM(BPF_REG_3, 0),
527 BPF_LD_MAP_FD(BPF_REG_2, 0),
528 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
530 BPF_MOV64_IMM(BPF_REG_0, 0),
531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
532 BPF_EMIT_CALL(BPF_FUNC_sk_release),
535 .fixup_prog1 = { 17 },
536 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
537 .errstr = "tail_call would lead to reference leak",
541 "reference tracking: mangle and release sock_or_null",
543 BPF_SK_LOOKUP(sk_lookup_tcp),
544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
546 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
547 BPF_EMIT_CALL(BPF_FUNC_sk_release),
550 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
551 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
555 "reference tracking: mangle and release sock",
557 BPF_SK_LOOKUP(sk_lookup_tcp),
558 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
561 BPF_EMIT_CALL(BPF_FUNC_sk_release),
564 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
565 .errstr = "R1 pointer arithmetic on sock prohibited",
569 "reference tracking: access member",
571 BPF_SK_LOOKUP(sk_lookup_tcp),
572 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
574 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
575 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
576 BPF_EMIT_CALL(BPF_FUNC_sk_release),
579 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
583 "reference tracking: write to member",
585 BPF_SK_LOOKUP(sk_lookup_tcp),
586 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
588 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
589 BPF_LD_IMM64(BPF_REG_2, 42),
590 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
591 offsetof(struct bpf_sock, mark)),
592 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
593 BPF_EMIT_CALL(BPF_FUNC_sk_release),
594 BPF_LD_IMM64(BPF_REG_0, 0),
597 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
598 .errstr = "cannot write into sock",
602 "reference tracking: invalid 64-bit access of member",
604 BPF_SK_LOOKUP(sk_lookup_tcp),
605 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
607 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
609 BPF_EMIT_CALL(BPF_FUNC_sk_release),
612 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
613 .errstr = "invalid sock access off=0 size=8",
617 "reference tracking: access after release",
619 BPF_SK_LOOKUP(sk_lookup_tcp),
620 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
622 BPF_EMIT_CALL(BPF_FUNC_sk_release),
623 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
626 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
627 .errstr = "!read_ok",
631 "reference tracking: direct access for lookup",
633 /* Check that the packet is at least 64B long */
634 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
635 offsetof(struct __sk_buff, data)),
636 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
637 offsetof(struct __sk_buff, data_end)),
638 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
640 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
641 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
642 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
643 BPF_MOV64_IMM(BPF_REG_4, 0),
644 BPF_MOV64_IMM(BPF_REG_5, 0),
645 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
646 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
648 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
650 BPF_EMIT_CALL(BPF_FUNC_sk_release),
653 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
657 "reference tracking: use ptr from bpf_tcp_sock() after release",
659 BPF_SK_LOOKUP(sk_lookup_tcp),
660 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
662 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
663 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
664 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
665 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
666 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
667 BPF_EMIT_CALL(BPF_FUNC_sk_release),
669 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
670 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
671 BPF_EMIT_CALL(BPF_FUNC_sk_release),
672 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
675 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
677 .errstr = "invalid mem access",
678 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
681 "reference tracking: use ptr from bpf_sk_fullsock() after release",
683 BPF_SK_LOOKUP(sk_lookup_tcp),
684 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
686 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
688 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
689 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
690 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
691 BPF_EMIT_CALL(BPF_FUNC_sk_release),
693 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
695 BPF_EMIT_CALL(BPF_FUNC_sk_release),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
699 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
701 .errstr = "invalid mem access",
702 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
705 "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
707 BPF_SK_LOOKUP(sk_lookup_tcp),
708 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
710 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
711 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
712 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
713 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
714 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
715 BPF_EMIT_CALL(BPF_FUNC_sk_release),
717 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
718 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
720 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
721 BPF_EMIT_CALL(BPF_FUNC_sk_release),
722 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
724 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
727 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
729 .errstr = "invalid mem access",
730 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
733 "reference tracking: use sk after bpf_sk_release(tp)",
735 BPF_SK_LOOKUP(sk_lookup_tcp),
736 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
738 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
739 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
740 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
741 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
742 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
743 BPF_EMIT_CALL(BPF_FUNC_sk_release),
745 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
746 BPF_EMIT_CALL(BPF_FUNC_sk_release),
747 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
750 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
752 .errstr = "invalid mem access",
753 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
756 "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
758 BPF_SK_LOOKUP(sk_lookup_tcp),
759 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
761 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
762 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
763 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
764 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
765 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
766 BPF_EMIT_CALL(BPF_FUNC_sk_release),
768 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
769 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
770 BPF_EMIT_CALL(BPF_FUNC_sk_release),
771 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
774 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
778 "reference tracking: bpf_sk_release(listen_sk)",
780 BPF_SK_LOOKUP(sk_lookup_tcp),
781 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
783 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
784 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
785 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
786 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
788 BPF_EMIT_CALL(BPF_FUNC_sk_release),
790 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
791 BPF_EMIT_CALL(BPF_FUNC_sk_release),
792 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
793 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
794 BPF_EMIT_CALL(BPF_FUNC_sk_release),
797 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
799 .errstr = "reference has not been acquired before",
802 /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
803 "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
805 BPF_SK_LOOKUP(sk_lookup_tcp),
806 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
808 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
810 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
811 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
812 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
813 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
814 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
815 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
816 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
817 BPF_EMIT_CALL(BPF_FUNC_sk_release),
819 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
820 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
821 BPF_EMIT_CALL(BPF_FUNC_sk_release),
824 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
826 .errstr = "invalid mem access",
829 "reference tracking: branch tracking valid pointer null comparison",
831 BPF_SK_LOOKUP(sk_lookup_tcp),
832 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
833 BPF_MOV64_IMM(BPF_REG_3, 1),
834 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
835 BPF_MOV64_IMM(BPF_REG_3, 0),
836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2),
837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
838 BPF_EMIT_CALL(BPF_FUNC_sk_release),
841 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
845 "reference tracking: branch tracking valid pointer value comparison",
847 BPF_SK_LOOKUP(sk_lookup_tcp),
848 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
849 BPF_MOV64_IMM(BPF_REG_3, 1),
850 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4),
851 BPF_MOV64_IMM(BPF_REG_3, 0),
852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 1234, 2),
853 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
854 BPF_EMIT_CALL(BPF_FUNC_sk_release),
857 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
858 .errstr = "Unreleased reference",
862 "reference tracking: bpf_sk_release(btf_tcp_sock)",
864 BPF_SK_LOOKUP(sk_lookup_tcp),
865 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
867 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
868 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
869 BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock),
870 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
872 BPF_EMIT_CALL(BPF_FUNC_sk_release),
874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
875 BPF_EMIT_CALL(BPF_FUNC_sk_release),
878 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
880 .result_unpriv = REJECT,
881 .errstr_unpriv = "unknown func",
884 "reference tracking: use ptr from bpf_skc_to_tcp_sock() after release",
886 BPF_SK_LOOKUP(sk_lookup_tcp),
887 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
889 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
890 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
891 BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock),
892 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
894 BPF_EMIT_CALL(BPF_FUNC_sk_release),
896 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
897 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
898 BPF_EMIT_CALL(BPF_FUNC_sk_release),
899 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0),
902 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
904 .errstr = "invalid mem access",
905 .result_unpriv = REJECT,
906 .errstr_unpriv = "unknown func",
909 "reference tracking: try to leak released ptr reg",
911 BPF_MOV64_IMM(BPF_REG_0, 0),
912 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
913 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
915 BPF_LD_MAP_FD(BPF_REG_1, 0),
916 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
917 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
919 BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
921 BPF_MOV64_IMM(BPF_REG_0, 0),
922 BPF_LD_MAP_FD(BPF_REG_1, 0),
923 BPF_MOV64_IMM(BPF_REG_2, 8),
924 BPF_MOV64_IMM(BPF_REG_3, 0),
925 BPF_EMIT_CALL(BPF_FUNC_ringbuf_reserve),
926 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
928 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
930 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
931 BPF_MOV64_IMM(BPF_REG_2, 0),
932 BPF_EMIT_CALL(BPF_FUNC_ringbuf_discard),
933 BPF_MOV64_IMM(BPF_REG_0, 0),
935 BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_8, 0),
938 .fixup_map_array_48b = { 4 },
939 .fixup_map_ringbuf = { 11 },
941 .result_unpriv = REJECT,
942 .errstr_unpriv = "R8 !read_ok"