1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 #ifndef _UAPI__LINUX_BPF_H__
9 #define _UAPI__LINUX_BPF_H__
11 #include <linux/types.h>
12 #include <linux/bpf_common.h>
14 /* Extended instruction set based on top of classic BPF */
16 /* instruction classes */
17 #define BPF_JMP32 0x06 /* jmp mode in word width */
18 #define BPF_ALU64 0x07 /* alu mode in double word width */
21 #define BPF_DW 0x18 /* double word (64-bit) */
22 #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
23 #define BPF_XADD 0xc0 /* exclusive add - legacy name */
26 #define BPF_MOV 0xb0 /* mov reg to reg */
27 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
29 /* change endianness of a register */
30 #define BPF_END 0xd0 /* flags for endianness conversion: */
31 #define BPF_TO_LE 0x00 /* convert to little-endian */
32 #define BPF_TO_BE 0x08 /* convert to big-endian */
33 #define BPF_FROM_LE BPF_TO_LE
34 #define BPF_FROM_BE BPF_TO_BE
37 #define BPF_JNE 0x50 /* jump != */
38 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */
39 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
40 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
41 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
42 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */
43 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
44 #define BPF_CALL 0x80 /* function call */
45 #define BPF_EXIT 0x90 /* function return */
47 /* atomic op type fields (stored in immediate) */
48 #define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */
49 #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
50 #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
52 /* Register numbers */
68 /* BPF has 10 general purpose 64-bit registers and stack frame. */
69 #define MAX_BPF_REG __MAX_BPF_REG
72 __u8 code; /* opcode */
73 __u8 dst_reg:4; /* dest register */
74 __u8 src_reg:4; /* source register */
75 __s16 off; /* signed offset */
76 __s32 imm; /* signed immediate constant */
79 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
80 struct bpf_lpm_trie_key {
81 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
82 __u8 data[0]; /* Arbitrary size */
85 struct bpf_cgroup_storage_key {
86 __u64 cgroup_inode_id; /* cgroup inode id */
87 __u32 attach_type; /* program attach type (enum bpf_attach_type) */
90 union bpf_iter_link_info {
96 /* BPF syscall commands, see bpf(2) man-page for more details. */
98 * DOC: eBPF Syscall Preamble
100 * The operation to be performed by the **bpf**\ () system call is determined
101 * by the *cmd* argument. Each operation takes an accompanying argument,
102 * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see
103 * below). The size argument is the size of the union pointed to by *attr*.
106 * DOC: eBPF Syscall Commands
110 * Create a map and return a file descriptor that refers to the
111 * map. The close-on-exec file descriptor flag (see **fcntl**\ (2))
112 * is automatically enabled for the new file descriptor.
114 * Applying **close**\ (2) to the file descriptor returned by
115 * **BPF_MAP_CREATE** will delete the map (but see NOTES).
118 * A new file descriptor (a nonnegative integer), or -1 if an
119 * error occurred (in which case, *errno* is set appropriately).
121 * BPF_MAP_LOOKUP_ELEM
123 * Look up an element with a given *key* in the map referred to
124 * by the file descriptor *map_fd*.
126 * The *flags* argument may be specified as one of the
130 * Look up the value of a spin-locked map without
131 * returning the lock. This must be specified if the
132 * elements contain a spinlock.
135 * Returns zero on success. On error, -1 is returned and *errno*
136 * is set appropriately.
138 * BPF_MAP_UPDATE_ELEM
140 * Create or update an element (key/value pair) in a specified map.
142 * The *flags* argument should be specified as one of the
146 * Create a new element or update an existing element.
148 * Create a new element only if it did not exist.
150 * Update an existing element.
152 * Update a spin_lock-ed map element.
155 * Returns zero on success. On error, -1 is returned and *errno*
156 * is set appropriately.
158 * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**,
159 * **E2BIG**, **EEXIST**, or **ENOENT**.
162 * The number of elements in the map reached the
163 * *max_entries* limit specified at map creation time.
165 * If *flags* specifies **BPF_NOEXIST** and the element
166 * with *key* already exists in the map.
168 * If *flags* specifies **BPF_EXIST** and the element with
169 * *key* does not exist in the map.
171 * BPF_MAP_DELETE_ELEM
173 * Look up and delete an element by key in a specified map.
176 * Returns zero on success. On error, -1 is returned and *errno*
177 * is set appropriately.
179 * BPF_MAP_GET_NEXT_KEY
181 * Look up an element by key in a specified map and return the key
182 * of the next element. Can be used to iterate over all elements
186 * Returns zero on success. On error, -1 is returned and *errno*
187 * is set appropriately.
189 * The following cases can be used to iterate over all elements of
192 * * If *key* is not found, the operation returns zero and sets
193 * the *next_key* pointer to the key of the first element.
194 * * If *key* is found, the operation returns zero and sets the
195 * *next_key* pointer to the key of the next element.
196 * * If *key* is the last element, returns -1 and *errno* is set
199 * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or
200 * **EINVAL** on error.
204 * Verify and load an eBPF program, returning a new file
205 * descriptor associated with the program.
207 * Applying **close**\ (2) to the file descriptor returned by
208 * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES).
210 * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is
211 * automatically enabled for the new file descriptor.
214 * A new file descriptor (a nonnegative integer), or -1 if an
215 * error occurred (in which case, *errno* is set appropriately).
219 * Pin an eBPF program or map referred by the specified *bpf_fd*
220 * to the provided *pathname* on the filesystem.
222 * The *pathname* argument must not contain a dot (".").
224 * On success, *pathname* retains a reference to the eBPF object,
225 * preventing deallocation of the object when the original
226 * *bpf_fd* is closed. This allow the eBPF object to live beyond
227 * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent
230 * Applying **unlink**\ (2) or similar calls to the *pathname*
231 * unpins the object from the filesystem, removing the reference.
232 * If no other file descriptors or filesystem nodes refer to the
233 * same object, it will be deallocated (see NOTES).
235 * The filesystem type for the parent directory of *pathname* must
236 * be **BPF_FS_MAGIC**.
239 * Returns zero on success. On error, -1 is returned and *errno*
240 * is set appropriately.
244 * Open a file descriptor for the eBPF object pinned to the
245 * specified *pathname*.
248 * A new file descriptor (a nonnegative integer), or -1 if an
249 * error occurred (in which case, *errno* is set appropriately).
253 * Attach an eBPF program to a *target_fd* at the specified
254 * *attach_type* hook.
256 * The *attach_type* specifies the eBPF attachment point to
257 * attach the program to, and must be one of *bpf_attach_type*
260 * The *attach_bpf_fd* must be a valid file descriptor for a
261 * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap
262 * or sock_ops type corresponding to the specified *attach_type*.
264 * The *target_fd* must be a valid file descriptor for a kernel
265 * object which depends on the attach type of *attach_bpf_fd*:
267 * **BPF_PROG_TYPE_CGROUP_DEVICE**,
268 * **BPF_PROG_TYPE_CGROUP_SKB**,
269 * **BPF_PROG_TYPE_CGROUP_SOCK**,
270 * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
271 * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
272 * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
273 * **BPF_PROG_TYPE_SOCK_OPS**
275 * Control Group v2 hierarchy with the eBPF controller
276 * enabled. Requires the kernel to be compiled with
277 * **CONFIG_CGROUP_BPF**.
279 * **BPF_PROG_TYPE_FLOW_DISSECTOR**
281 * Network namespace (eg /proc/self/ns/net).
283 * **BPF_PROG_TYPE_LIRC_MODE2**
285 * LIRC device path (eg /dev/lircN). Requires the kernel
286 * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
288 * **BPF_PROG_TYPE_SK_SKB**,
289 * **BPF_PROG_TYPE_SK_MSG**
291 * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**).
294 * Returns zero on success. On error, -1 is returned and *errno*
295 * is set appropriately.
299 * Detach the eBPF program associated with the *target_fd* at the
300 * hook specified by *attach_type*. The program must have been
301 * previously attached using **BPF_PROG_ATTACH**.
304 * Returns zero on success. On error, -1 is returned and *errno*
305 * is set appropriately.
309 * Run the eBPF program associated with the *prog_fd* a *repeat*
310 * number of times against a provided program context *ctx_in* and
311 * data *data_in*, and return the modified program context
312 * *ctx_out*, *data_out* (for example, packet data), result of the
313 * execution *retval*, and *duration* of the test run.
315 * The sizes of the buffers provided as input and output
316 * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must
317 * be provided in the corresponding variables *ctx_size_in*,
318 * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any
319 * of these parameters are not provided (ie set to NULL), the
320 * corresponding size field must be zero.
322 * Some program types have particular requirements:
324 * **BPF_PROG_TYPE_SK_LOOKUP**
325 * *data_in* and *data_out* must be NULL.
327 * **BPF_PROG_TYPE_RAW_TRACEPOINT**,
328 * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE**
330 * *ctx_out*, *data_in* and *data_out* must be NULL.
331 * *repeat* must be zero.
333 * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN.
336 * Returns zero on success. On error, -1 is returned and *errno*
337 * is set appropriately.
340 * Either *data_size_out* or *ctx_size_out* is too small.
342 * This command is not supported by the program type of
343 * the program referred to by *prog_fd*.
345 * BPF_PROG_GET_NEXT_ID
347 * Fetch the next eBPF program currently loaded into the kernel.
349 * Looks for the eBPF program with an id greater than *start_id*
350 * and updates *next_id* on success. If no other eBPF programs
351 * remain with ids higher than *start_id*, returns -1 and sets
352 * *errno* to **ENOENT**.
355 * Returns zero on success. On error, or when no id remains, -1
356 * is returned and *errno* is set appropriately.
358 * BPF_MAP_GET_NEXT_ID
360 * Fetch the next eBPF map currently loaded into the kernel.
362 * Looks for the eBPF map with an id greater than *start_id*
363 * and updates *next_id* on success. If no other eBPF maps
364 * remain with ids higher than *start_id*, returns -1 and sets
365 * *errno* to **ENOENT**.
368 * Returns zero on success. On error, or when no id remains, -1
369 * is returned and *errno* is set appropriately.
371 * BPF_PROG_GET_FD_BY_ID
373 * Open a file descriptor for the eBPF program corresponding to
377 * A new file descriptor (a nonnegative integer), or -1 if an
378 * error occurred (in which case, *errno* is set appropriately).
380 * BPF_MAP_GET_FD_BY_ID
382 * Open a file descriptor for the eBPF map corresponding to
386 * A new file descriptor (a nonnegative integer), or -1 if an
387 * error occurred (in which case, *errno* is set appropriately).
389 * BPF_OBJ_GET_INFO_BY_FD
391 * Obtain information about the eBPF object corresponding to
394 * Populates up to *info_len* bytes of *info*, which will be in
395 * one of the following formats depending on the eBPF object type
398 * * **struct bpf_prog_info**
399 * * **struct bpf_map_info**
400 * * **struct bpf_btf_info**
401 * * **struct bpf_link_info**
404 * Returns zero on success. On error, -1 is returned and *errno*
405 * is set appropriately.
409 * Obtain information about eBPF programs associated with the
410 * specified *attach_type* hook.
412 * The *target_fd* must be a valid file descriptor for a kernel
413 * object which depends on the attach type of *attach_bpf_fd*:
415 * **BPF_PROG_TYPE_CGROUP_DEVICE**,
416 * **BPF_PROG_TYPE_CGROUP_SKB**,
417 * **BPF_PROG_TYPE_CGROUP_SOCK**,
418 * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
419 * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
420 * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
421 * **BPF_PROG_TYPE_SOCK_OPS**
423 * Control Group v2 hierarchy with the eBPF controller
424 * enabled. Requires the kernel to be compiled with
425 * **CONFIG_CGROUP_BPF**.
427 * **BPF_PROG_TYPE_FLOW_DISSECTOR**
429 * Network namespace (eg /proc/self/ns/net).
431 * **BPF_PROG_TYPE_LIRC_MODE2**
433 * LIRC device path (eg /dev/lircN). Requires the kernel
434 * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
436 * **BPF_PROG_QUERY** always fetches the number of programs
437 * attached and the *attach_flags* which were used to attach those
438 * programs. Additionally, if *prog_ids* is nonzero and the number
439 * of attached programs is less than *prog_cnt*, populates
440 * *prog_ids* with the eBPF program ids of the programs attached
443 * The following flags may alter the result:
445 * **BPF_F_QUERY_EFFECTIVE**
446 * Only return information regarding programs which are
447 * currently effective at the specified *target_fd*.
450 * Returns zero on success. On error, -1 is returned and *errno*
451 * is set appropriately.
453 * BPF_RAW_TRACEPOINT_OPEN
455 * Attach an eBPF program to a tracepoint *name* to access kernel
456 * internal arguments of the tracepoint in their raw form.
458 * The *prog_fd* must be a valid file descriptor associated with
459 * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**.
461 * No ABI guarantees are made about the content of tracepoint
462 * arguments exposed to the corresponding eBPF program.
464 * Applying **close**\ (2) to the file descriptor returned by
465 * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES).
468 * A new file descriptor (a nonnegative integer), or -1 if an
469 * error occurred (in which case, *errno* is set appropriately).
473 * Verify and load BPF Type Format (BTF) metadata into the kernel,
474 * returning a new file descriptor associated with the metadata.
475 * BTF is described in more detail at
476 * https://www.kernel.org/doc/html/latest/bpf/btf.html.
478 * The *btf* parameter must point to valid memory providing
479 * *btf_size* bytes of BTF binary metadata.
481 * The returned file descriptor can be passed to other **bpf**\ ()
482 * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to
483 * associate the BTF with those objects.
485 * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional
486 * parameters to specify a *btf_log_buf*, *btf_log_size* and
487 * *btf_log_level* which allow the kernel to return freeform log
488 * output regarding the BTF verification process.
491 * A new file descriptor (a nonnegative integer), or -1 if an
492 * error occurred (in which case, *errno* is set appropriately).
494 * BPF_BTF_GET_FD_BY_ID
496 * Open a file descriptor for the BPF Type Format (BTF)
497 * corresponding to *btf_id*.
500 * A new file descriptor (a nonnegative integer), or -1 if an
501 * error occurred (in which case, *errno* is set appropriately).
505 * Obtain information about eBPF programs associated with the
506 * target process identified by *pid* and *fd*.
508 * If the *pid* and *fd* are associated with a tracepoint, kprobe
509 * or uprobe perf event, then the *prog_id* and *fd_type* will
510 * be populated with the eBPF program id and file descriptor type
511 * of type **bpf_task_fd_type**. If associated with a kprobe or
512 * uprobe, the *probe_offset* and *probe_addr* will also be
513 * populated. Optionally, if *buf* is provided, then up to
514 * *buf_len* bytes of *buf* will be populated with the name of
515 * the tracepoint, kprobe or uprobe.
517 * The resulting *prog_id* may be introspected in deeper detail
518 * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**.
521 * Returns zero on success. On error, -1 is returned and *errno*
522 * is set appropriately.
524 * BPF_MAP_LOOKUP_AND_DELETE_ELEM
526 * Look up an element with the given *key* in the map referred to
527 * by the file descriptor *fd*, and if found, delete the element.
529 * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
530 * types, the *flags* argument needs to be set to 0, but for other
531 * map types, it may be specified as:
534 * Look up and delete the value of a spin-locked map
535 * without returning the lock. This must be specified if
536 * the elements contain a spinlock.
538 * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
539 * implement this command as a "pop" operation, deleting the top
540 * element rather than one corresponding to *key*.
541 * The *key* and *key_len* parameters should be zeroed when
542 * issuing this operation for these map types.
544 * This command is only valid for the following map types:
545 * * **BPF_MAP_TYPE_QUEUE**
546 * * **BPF_MAP_TYPE_STACK**
547 * * **BPF_MAP_TYPE_HASH**
548 * * **BPF_MAP_TYPE_PERCPU_HASH**
549 * * **BPF_MAP_TYPE_LRU_HASH**
550 * * **BPF_MAP_TYPE_LRU_PERCPU_HASH**
553 * Returns zero on success. On error, -1 is returned and *errno*
554 * is set appropriately.
558 * Freeze the permissions of the specified map.
560 * Write permissions may be frozen by passing zero *flags*.
561 * Upon success, no future syscall invocations may alter the
562 * map state of *map_fd*. Write operations from eBPF programs
563 * are still possible for a frozen map.
565 * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**.
568 * Returns zero on success. On error, -1 is returned and *errno*
569 * is set appropriately.
571 * BPF_BTF_GET_NEXT_ID
573 * Fetch the next BPF Type Format (BTF) object currently loaded
576 * Looks for the BTF object with an id greater than *start_id*
577 * and updates *next_id* on success. If no other BTF objects
578 * remain with ids higher than *start_id*, returns -1 and sets
579 * *errno* to **ENOENT**.
582 * Returns zero on success. On error, or when no id remains, -1
583 * is returned and *errno* is set appropriately.
585 * BPF_MAP_LOOKUP_BATCH
587 * Iterate and fetch multiple elements in a map.
589 * Two opaque values are used to manage batch operations,
590 * *in_batch* and *out_batch*. Initially, *in_batch* must be set
591 * to NULL to begin the batched operation. After each subsequent
592 * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
593 * *out_batch* as the *in_batch* for the next operation to
594 * continue iteration from the current point.
596 * The *keys* and *values* are output parameters which must point
597 * to memory large enough to hold *count* items based on the key
598 * and value size of the map *map_fd*. The *keys* buffer must be
599 * of *key_size* * *count*. The *values* buffer must be of
600 * *value_size* * *count*.
602 * The *elem_flags* argument may be specified as one of the
606 * Look up the value of a spin-locked map without
607 * returning the lock. This must be specified if the
608 * elements contain a spinlock.
610 * On success, *count* elements from the map are copied into the
611 * user buffer, with the keys copied into *keys* and the values
612 * copied into the corresponding indices in *values*.
614 * If an error is returned and *errno* is not **EFAULT**, *count*
615 * is set to the number of successfully processed elements.
618 * Returns zero on success. On error, -1 is returned and *errno*
619 * is set appropriately.
621 * May set *errno* to **ENOSPC** to indicate that *keys* or
622 * *values* is too small to dump an entire bucket during
623 * iteration of a hash-based map type.
625 * BPF_MAP_LOOKUP_AND_DELETE_BATCH
627 * Iterate and delete all elements in a map.
629 * This operation has the same behavior as
630 * **BPF_MAP_LOOKUP_BATCH** with two exceptions:
632 * * Every element that is successfully returned is also deleted
633 * from the map. This is at least *count* elements. Note that
634 * *count* is both an input and an output parameter.
635 * * Upon returning with *errno* set to **EFAULT**, up to
636 * *count* elements may be deleted without returning the keys
637 * and values of the deleted elements.
640 * Returns zero on success. On error, -1 is returned and *errno*
641 * is set appropriately.
643 * BPF_MAP_UPDATE_BATCH
645 * Update multiple elements in a map by *key*.
647 * The *keys* and *values* are input parameters which must point
648 * to memory large enough to hold *count* items based on the key
649 * and value size of the map *map_fd*. The *keys* buffer must be
650 * of *key_size* * *count*. The *values* buffer must be of
651 * *value_size* * *count*.
653 * Each element specified in *keys* is sequentially updated to the
654 * value in the corresponding index in *values*. The *in_batch*
655 * and *out_batch* parameters are ignored and should be zeroed.
657 * The *elem_flags* argument should be specified as one of the
661 * Create new elements or update a existing elements.
663 * Create new elements only if they do not exist.
665 * Update existing elements.
667 * Update spin_lock-ed map elements. This must be
668 * specified if the map value contains a spinlock.
670 * On success, *count* elements from the map are updated.
672 * If an error is returned and *errno* is not **EFAULT**, *count*
673 * is set to the number of successfully processed elements.
676 * Returns zero on success. On error, -1 is returned and *errno*
677 * is set appropriately.
679 * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or
680 * **E2BIG**. **E2BIG** indicates that the number of elements in
681 * the map reached the *max_entries* limit specified at map
684 * May set *errno* to one of the following error codes under
685 * specific circumstances:
688 * If *flags* specifies **BPF_NOEXIST** and the element
689 * with *key* already exists in the map.
691 * If *flags* specifies **BPF_EXIST** and the element with
692 * *key* does not exist in the map.
694 * BPF_MAP_DELETE_BATCH
696 * Delete multiple elements in a map by *key*.
698 * The *keys* parameter is an input parameter which must point
699 * to memory large enough to hold *count* items based on the key
700 * size of the map *map_fd*, that is, *key_size* * *count*.
702 * Each element specified in *keys* is sequentially deleted. The
703 * *in_batch*, *out_batch*, and *values* parameters are ignored
704 * and should be zeroed.
706 * The *elem_flags* argument may be specified as one of the
710 * Look up the value of a spin-locked map without
711 * returning the lock. This must be specified if the
712 * elements contain a spinlock.
714 * On success, *count* elements from the map are updated.
716 * If an error is returned and *errno* is not **EFAULT**, *count*
717 * is set to the number of successfully processed elements. If
718 * *errno* is **EFAULT**, up to *count* elements may be been
722 * Returns zero on success. On error, -1 is returned and *errno*
723 * is set appropriately.
727 * Attach an eBPF program to a *target_fd* at the specified
728 * *attach_type* hook and return a file descriptor handle for
732 * A new file descriptor (a nonnegative integer), or -1 if an
733 * error occurred (in which case, *errno* is set appropriately).
737 * Update the eBPF program in the specified *link_fd* to
741 * Returns zero on success. On error, -1 is returned and *errno*
742 * is set appropriately.
744 * BPF_LINK_GET_FD_BY_ID
746 * Open a file descriptor for the eBPF Link corresponding to
750 * A new file descriptor (a nonnegative integer), or -1 if an
751 * error occurred (in which case, *errno* is set appropriately).
753 * BPF_LINK_GET_NEXT_ID
755 * Fetch the next eBPF link currently loaded into the kernel.
757 * Looks for the eBPF link with an id greater than *start_id*
758 * and updates *next_id* on success. If no other eBPF links
759 * remain with ids higher than *start_id*, returns -1 and sets
760 * *errno* to **ENOENT**.
763 * Returns zero on success. On error, or when no id remains, -1
764 * is returned and *errno* is set appropriately.
768 * Enable eBPF runtime statistics gathering.
770 * Runtime statistics gathering for the eBPF runtime is disabled
771 * by default to minimize the corresponding performance overhead.
772 * This command enables statistics globally.
774 * Multiple programs may independently enable statistics.
775 * After gathering the desired statistics, eBPF runtime statistics
776 * may be disabled again by calling **close**\ (2) for the file
777 * descriptor returned by this function. Statistics will only be
778 * disabled system-wide when all outstanding file descriptors
779 * returned by prior calls for this subcommand are closed.
782 * A new file descriptor (a nonnegative integer), or -1 if an
783 * error occurred (in which case, *errno* is set appropriately).
787 * Create an iterator on top of the specified *link_fd* (as
788 * previously created using **BPF_LINK_CREATE**) and return a
789 * file descriptor that can be used to trigger the iteration.
791 * If the resulting file descriptor is pinned to the filesystem
792 * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls
793 * for that path will trigger the iterator to read kernel state
794 * using the eBPF program attached to *link_fd*.
797 * A new file descriptor (a nonnegative integer), or -1 if an
798 * error occurred (in which case, *errno* is set appropriately).
802 * Forcefully detach the specified *link_fd* from its
803 * corresponding attachment point.
806 * Returns zero on success. On error, -1 is returned and *errno*
807 * is set appropriately.
811 * Bind a map to the lifetime of an eBPF program.
813 * The map identified by *map_fd* is bound to the program
814 * identified by *prog_fd* and only released when *prog_fd* is
815 * released. This may be used in cases where metadata should be
816 * associated with a program which otherwise does not contain any
817 * references to the map (for example, embedded in the eBPF
818 * program instructions).
821 * Returns zero on success. On error, -1 is returned and *errno*
822 * is set appropriately.
825 * eBPF objects (maps and programs) can be shared between processes.
827 * * After **fork**\ (2), the child inherits file descriptors
828 * referring to the same eBPF objects.
829 * * File descriptors referring to eBPF objects can be transferred over
830 * **unix**\ (7) domain sockets.
831 * * File descriptors referring to eBPF objects can be duplicated in the
832 * usual way, using **dup**\ (2) and similar calls.
833 * * File descriptors referring to eBPF objects can be pinned to the
834 * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2).
836 * An eBPF object is deallocated only after all file descriptors referring
837 * to the object have been closed and no references remain pinned to the
838 * filesystem or attached (for example, bound to a program or device).
845 BPF_MAP_GET_NEXT_KEY,
852 BPF_PROG_RUN = BPF_PROG_TEST_RUN,
853 BPF_PROG_GET_NEXT_ID,
855 BPF_PROG_GET_FD_BY_ID,
856 BPF_MAP_GET_FD_BY_ID,
857 BPF_OBJ_GET_INFO_BY_FD,
859 BPF_RAW_TRACEPOINT_OPEN,
861 BPF_BTF_GET_FD_BY_ID,
863 BPF_MAP_LOOKUP_AND_DELETE_ELEM,
866 BPF_MAP_LOOKUP_BATCH,
867 BPF_MAP_LOOKUP_AND_DELETE_BATCH,
868 BPF_MAP_UPDATE_BATCH,
869 BPF_MAP_DELETE_BATCH,
872 BPF_LINK_GET_FD_BY_ID,
873 BPF_LINK_GET_NEXT_ID,
884 BPF_MAP_TYPE_PROG_ARRAY,
885 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
886 BPF_MAP_TYPE_PERCPU_HASH,
887 BPF_MAP_TYPE_PERCPU_ARRAY,
888 BPF_MAP_TYPE_STACK_TRACE,
889 BPF_MAP_TYPE_CGROUP_ARRAY,
890 BPF_MAP_TYPE_LRU_HASH,
891 BPF_MAP_TYPE_LRU_PERCPU_HASH,
892 BPF_MAP_TYPE_LPM_TRIE,
893 BPF_MAP_TYPE_ARRAY_OF_MAPS,
894 BPF_MAP_TYPE_HASH_OF_MAPS,
896 BPF_MAP_TYPE_SOCKMAP,
899 BPF_MAP_TYPE_SOCKHASH,
900 BPF_MAP_TYPE_CGROUP_STORAGE,
901 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
902 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
905 BPF_MAP_TYPE_SK_STORAGE,
906 BPF_MAP_TYPE_DEVMAP_HASH,
907 BPF_MAP_TYPE_STRUCT_OPS,
908 BPF_MAP_TYPE_RINGBUF,
909 BPF_MAP_TYPE_INODE_STORAGE,
910 BPF_MAP_TYPE_TASK_STORAGE,
911 BPF_MAP_TYPE_BLOOM_FILTER,
914 /* Note that tracing related programs such as
915 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
916 * are not subject to a stable API since kernel internal data
917 * structures can change from release to release and may
918 * therefore break existing tracing BPF programs. Tracing BPF
919 * programs correspond to /a/ specific kernel which is to be
920 * analyzed, and not /a/ specific kernel /and/ all future ones.
923 BPF_PROG_TYPE_UNSPEC,
924 BPF_PROG_TYPE_SOCKET_FILTER,
925 BPF_PROG_TYPE_KPROBE,
926 BPF_PROG_TYPE_SCHED_CLS,
927 BPF_PROG_TYPE_SCHED_ACT,
928 BPF_PROG_TYPE_TRACEPOINT,
930 BPF_PROG_TYPE_PERF_EVENT,
931 BPF_PROG_TYPE_CGROUP_SKB,
932 BPF_PROG_TYPE_CGROUP_SOCK,
933 BPF_PROG_TYPE_LWT_IN,
934 BPF_PROG_TYPE_LWT_OUT,
935 BPF_PROG_TYPE_LWT_XMIT,
936 BPF_PROG_TYPE_SOCK_OPS,
937 BPF_PROG_TYPE_SK_SKB,
938 BPF_PROG_TYPE_CGROUP_DEVICE,
939 BPF_PROG_TYPE_SK_MSG,
940 BPF_PROG_TYPE_RAW_TRACEPOINT,
941 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
942 BPF_PROG_TYPE_LWT_SEG6LOCAL,
943 BPF_PROG_TYPE_LIRC_MODE2,
944 BPF_PROG_TYPE_SK_REUSEPORT,
945 BPF_PROG_TYPE_FLOW_DISSECTOR,
946 BPF_PROG_TYPE_CGROUP_SYSCTL,
947 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
948 BPF_PROG_TYPE_CGROUP_SOCKOPT,
949 BPF_PROG_TYPE_TRACING,
950 BPF_PROG_TYPE_STRUCT_OPS,
953 BPF_PROG_TYPE_SK_LOOKUP,
954 BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
957 enum bpf_attach_type {
958 BPF_CGROUP_INET_INGRESS,
959 BPF_CGROUP_INET_EGRESS,
960 BPF_CGROUP_INET_SOCK_CREATE,
962 BPF_SK_SKB_STREAM_PARSER,
963 BPF_SK_SKB_STREAM_VERDICT,
966 BPF_CGROUP_INET4_BIND,
967 BPF_CGROUP_INET6_BIND,
968 BPF_CGROUP_INET4_CONNECT,
969 BPF_CGROUP_INET6_CONNECT,
970 BPF_CGROUP_INET4_POST_BIND,
971 BPF_CGROUP_INET6_POST_BIND,
972 BPF_CGROUP_UDP4_SENDMSG,
973 BPF_CGROUP_UDP6_SENDMSG,
977 BPF_CGROUP_UDP4_RECVMSG,
978 BPF_CGROUP_UDP6_RECVMSG,
979 BPF_CGROUP_GETSOCKOPT,
980 BPF_CGROUP_SETSOCKOPT,
987 BPF_CGROUP_INET4_GETPEERNAME,
988 BPF_CGROUP_INET6_GETPEERNAME,
989 BPF_CGROUP_INET4_GETSOCKNAME,
990 BPF_CGROUP_INET6_GETSOCKNAME,
992 BPF_CGROUP_INET_SOCK_RELEASE,
997 BPF_SK_REUSEPORT_SELECT,
998 BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
1000 BPF_TRACE_KPROBE_MULTI,
1002 __MAX_BPF_ATTACH_TYPE
1005 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
1007 enum bpf_link_type {
1008 BPF_LINK_TYPE_UNSPEC = 0,
1009 BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
1010 BPF_LINK_TYPE_TRACING = 2,
1011 BPF_LINK_TYPE_CGROUP = 3,
1012 BPF_LINK_TYPE_ITER = 4,
1013 BPF_LINK_TYPE_NETNS = 5,
1014 BPF_LINK_TYPE_XDP = 6,
1015 BPF_LINK_TYPE_PERF_EVENT = 7,
1016 BPF_LINK_TYPE_KPROBE_MULTI = 8,
1017 BPF_LINK_TYPE_STRUCT_OPS = 9,
1022 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
1024 * NONE(default): No further bpf programs allowed in the subtree.
1026 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
1027 * the program in this cgroup yields to sub-cgroup program.
1029 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
1030 * that cgroup program gets run in addition to the program in this cgroup.
1032 * Only one program is allowed to be attached to a cgroup with
1033 * NONE or BPF_F_ALLOW_OVERRIDE flag.
1034 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
1035 * release old program and attach the new one. Attach flags has to match.
1037 * Multiple programs are allowed to be attached to a cgroup with
1038 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
1039 * (those that were attached first, run first)
1040 * The programs of sub-cgroup are executed first, then programs of
1041 * this cgroup and then programs of parent cgroup.
1042 * When children program makes decision (like picking TCP CA or sock bind)
1043 * parent program has a chance to override it.
1045 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
1046 * programs for a cgroup. Though it's possible to replace an old program at
1047 * any position by also specifying BPF_F_REPLACE flag and position itself in
1048 * replace_bpf_fd attribute. Old program at this position will be released.
1050 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
1051 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
1053 * cgrp1 (MULTI progs A, B) ->
1054 * cgrp2 (OVERRIDE prog C) ->
1055 * cgrp3 (MULTI prog D) ->
1056 * cgrp4 (OVERRIDE prog E) ->
1057 * cgrp5 (NONE prog F)
1058 * the event in cgrp5 triggers execution of F,D,A,B in that order.
1059 * if prog F is detached, the execution is E,D,A,B
1060 * if prog F and D are detached, the execution is E,A,B
1061 * if prog F, E and D are detached, the execution is C,A,B
1063 * All eligible programs are executed regardless of return code from
1066 #define BPF_F_ALLOW_OVERRIDE (1U << 0)
1067 #define BPF_F_ALLOW_MULTI (1U << 1)
1068 #define BPF_F_REPLACE (1U << 2)
1070 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
1071 * verifier will perform strict alignment checking as if the kernel
1072 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
1073 * and NET_IP_ALIGN defined to 2.
1075 #define BPF_F_STRICT_ALIGNMENT (1U << 0)
1077 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
1078 * verifier will allow any alignment whatsoever. On platforms
1079 * with strict alignment requirements for loads ands stores (such
1080 * as sparc and mips) the verifier validates that all loads and
1081 * stores provably follow this requirement. This flag turns that
1082 * checking and enforcement off.
1084 * It is mostly used for testing when we want to validate the
1085 * context and memory access aspects of the verifier, but because
1086 * of an unaligned access the alignment check would trigger before
1087 * the one we are interested in.
1089 #define BPF_F_ANY_ALIGNMENT (1U << 1)
1091 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
1092 * Verifier does sub-register def/use analysis and identifies instructions whose
1093 * def only matters for low 32-bit, high 32-bit is never referenced later
1094 * through implicit zero extension. Therefore verifier notifies JIT back-ends
1095 * that it is safe to ignore clearing high 32-bit for these instructions. This
1096 * saves some back-ends a lot of code-gen. However such optimization is not
1097 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
1098 * hence hasn't used verifier's analysis result. But, we really want to have a
1099 * way to be able to verify the correctness of the described optimization on
1100 * x86_64 on which testsuites are frequently exercised.
1102 * So, this flag is introduced. Once it is set, verifier will randomize high
1103 * 32-bit for those instructions who has been identified as safe to ignore them.
1104 * Then, if verifier is not doing correct analysis, such randomization will
1105 * regress tests to expose bugs.
1107 #define BPF_F_TEST_RND_HI32 (1U << 2)
1109 /* The verifier internal test flag. Behavior is undefined */
1110 #define BPF_F_TEST_STATE_FREQ (1U << 3)
1112 /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
1113 * restrict map and helper usage for such programs. Sleepable BPF programs can
1114 * only be attached to hooks where kernel execution context allows sleeping.
1115 * Such programs are allowed to use helpers that may sleep like
1116 * bpf_copy_from_user().
1118 #define BPF_F_SLEEPABLE (1U << 4)
1120 /* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program
1121 * fully support xdp frags.
1123 #define BPF_F_XDP_HAS_FRAGS (1U << 5)
1125 /* link_create.kprobe_multi.flags used in LINK_CREATE command for
1126 * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
1128 #define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
1130 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have
1131 * the following extensions:
1133 * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
1134 * insn[0].imm: map fd or fd_idx
1138 * ldimm64 rewrite: address of map
1139 * verifier type: CONST_PTR_TO_MAP
1141 #define BPF_PSEUDO_MAP_FD 1
1142 #define BPF_PSEUDO_MAP_IDX 5
1144 /* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
1145 * insn[0].imm: map fd or fd_idx
1146 * insn[1].imm: offset into value
1149 * ldimm64 rewrite: address of map[0]+offset
1150 * verifier type: PTR_TO_MAP_VALUE
1152 #define BPF_PSEUDO_MAP_VALUE 2
1153 #define BPF_PSEUDO_MAP_IDX_VALUE 6
1155 /* insn[0].src_reg: BPF_PSEUDO_BTF_ID
1156 * insn[0].imm: kernel btd id of VAR
1160 * ldimm64 rewrite: address of the kernel variable
1161 * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
1164 #define BPF_PSEUDO_BTF_ID 3
1165 /* insn[0].src_reg: BPF_PSEUDO_FUNC
1166 * insn[0].imm: insn offset to the func
1170 * ldimm64 rewrite: address of the function
1171 * verifier type: PTR_TO_FUNC.
1173 #define BPF_PSEUDO_FUNC 4
1175 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
1176 * offset to another bpf function
1178 #define BPF_PSEUDO_CALL 1
1179 /* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
1180 * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
1182 #define BPF_PSEUDO_KFUNC_CALL 2
1184 /* flags for BPF_MAP_UPDATE_ELEM command */
1186 BPF_ANY = 0, /* create new element or update existing */
1187 BPF_NOEXIST = 1, /* create new element if it didn't exist */
1188 BPF_EXIST = 2, /* update existing element */
1189 BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
1192 /* flags for BPF_MAP_CREATE command */
1194 BPF_F_NO_PREALLOC = (1U << 0),
1195 /* Instead of having one common LRU list in the
1196 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
1197 * which can scale and perform better.
1198 * Note, the LRU nodes (including free nodes) cannot be moved
1199 * across different LRU lists.
1201 BPF_F_NO_COMMON_LRU = (1U << 1),
1202 /* Specify numa node during map creation */
1203 BPF_F_NUMA_NODE = (1U << 2),
1205 /* Flags for accessing BPF object from syscall side. */
1206 BPF_F_RDONLY = (1U << 3),
1207 BPF_F_WRONLY = (1U << 4),
1209 /* Flag for stack_map, store build_id+offset instead of pointer */
1210 BPF_F_STACK_BUILD_ID = (1U << 5),
1212 /* Zero-initialize hash function seed. This should only be used for testing. */
1213 BPF_F_ZERO_SEED = (1U << 6),
1215 /* Flags for accessing BPF object from program side. */
1216 BPF_F_RDONLY_PROG = (1U << 7),
1217 BPF_F_WRONLY_PROG = (1U << 8),
1219 /* Clone map from listener for newly accepted socket */
1220 BPF_F_CLONE = (1U << 9),
1222 /* Enable memory-mapping BPF map */
1223 BPF_F_MMAPABLE = (1U << 10),
1225 /* Share perf_event among processes */
1226 BPF_F_PRESERVE_ELEMS = (1U << 11),
1228 /* Create a map that is suitable to be an inner map with dynamic max entries */
1229 BPF_F_INNER_MAP = (1U << 12),
1232 /* Flags for BPF_PROG_QUERY. */
1234 /* Query effective (directly attached + inherited from ancestor cgroups)
1235 * programs that will be executed for events within a cgroup.
1236 * attach_flags with this flag are always returned 0.
1238 #define BPF_F_QUERY_EFFECTIVE (1U << 0)
1240 /* Flags for BPF_PROG_TEST_RUN */
1242 /* If set, run the test on the cpu specified by bpf_attr.test.cpu */
1243 #define BPF_F_TEST_RUN_ON_CPU (1U << 0)
1244 /* If set, XDP frames will be transmitted after processing */
1245 #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
1247 /* type for BPF_ENABLE_STATS */
1248 enum bpf_stats_type {
1249 /* enabled run_time_ns and run_cnt */
1250 BPF_STATS_RUN_TIME = 0,
1253 enum bpf_stack_build_id_status {
1254 /* user space need an empty entry to identify end of a trace */
1255 BPF_STACK_BUILD_ID_EMPTY = 0,
1256 /* with valid build_id and offset */
1257 BPF_STACK_BUILD_ID_VALID = 1,
1258 /* couldn't get build_id, fallback to ip */
1259 BPF_STACK_BUILD_ID_IP = 2,
1262 #define BPF_BUILD_ID_SIZE 20
1263 struct bpf_stack_build_id {
1265 unsigned char build_id[BPF_BUILD_ID_SIZE];
1272 #define BPF_OBJ_NAME_LEN 16U
1275 struct { /* anonymous struct used by BPF_MAP_CREATE command */
1276 __u32 map_type; /* one of enum bpf_map_type */
1277 __u32 key_size; /* size of key in bytes */
1278 __u32 value_size; /* size of value in bytes */
1279 __u32 max_entries; /* max number of entries in a map */
1280 __u32 map_flags; /* BPF_MAP_CREATE related
1281 * flags defined above.
1283 __u32 inner_map_fd; /* fd pointing to the inner map */
1284 __u32 numa_node; /* numa node (effective only if
1285 * BPF_F_NUMA_NODE is set).
1287 char map_name[BPF_OBJ_NAME_LEN];
1288 __u32 map_ifindex; /* ifindex of netdev to create on */
1289 __u32 btf_fd; /* fd pointing to a BTF type data */
1290 __u32 btf_key_type_id; /* BTF type_id of the key */
1291 __u32 btf_value_type_id; /* BTF type_id of the value */
1292 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
1293 * struct stored as the
1296 /* Any per-map-type extra fields
1298 * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
1299 * number of hash functions (if 0, the bloom filter will default
1300 * to using 5 hash functions).
1305 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
1309 __aligned_u64 value;
1310 __aligned_u64 next_key;
1315 struct { /* struct used by BPF_MAP_*_BATCH commands */
1316 __aligned_u64 in_batch; /* start batch,
1317 * NULL to start from beginning
1319 __aligned_u64 out_batch; /* output: next start batch */
1321 __aligned_u64 values;
1322 __u32 count; /* input/output:
1323 * input: # of key/value
1325 * output: # of filled elements
1332 struct { /* anonymous struct used by BPF_PROG_LOAD command */
1333 __u32 prog_type; /* one of enum bpf_prog_type */
1335 __aligned_u64 insns;
1336 __aligned_u64 license;
1337 __u32 log_level; /* verbosity level of verifier */
1338 __u32 log_size; /* size of user buffer */
1339 __aligned_u64 log_buf; /* user supplied buffer */
1340 __u32 kern_version; /* not used */
1342 char prog_name[BPF_OBJ_NAME_LEN];
1343 __u32 prog_ifindex; /* ifindex of netdev to prep for */
1344 /* For some prog types expected attach type must be known at
1345 * load time to verify attach type specific parts of prog
1346 * (context accesses, allowed helpers, etc).
1348 __u32 expected_attach_type;
1349 __u32 prog_btf_fd; /* fd pointing to BTF type data */
1350 __u32 func_info_rec_size; /* userspace bpf_func_info size */
1351 __aligned_u64 func_info; /* func info */
1352 __u32 func_info_cnt; /* number of bpf_func_info records */
1353 __u32 line_info_rec_size; /* userspace bpf_line_info size */
1354 __aligned_u64 line_info; /* line info */
1355 __u32 line_info_cnt; /* number of bpf_line_info records */
1356 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */
1358 /* valid prog_fd to attach to bpf prog */
1359 __u32 attach_prog_fd;
1360 /* or valid module BTF object fd or 0 to attach to vmlinux */
1361 __u32 attach_btf_obj_fd;
1363 __u32 core_relo_cnt; /* number of bpf_core_relo */
1364 __aligned_u64 fd_array; /* array of FDs */
1365 __aligned_u64 core_relos;
1366 __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
1369 struct { /* anonymous struct used by BPF_OBJ_* commands */
1370 __aligned_u64 pathname;
1375 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
1376 __u32 target_fd; /* container object to attach to */
1377 __u32 attach_bpf_fd; /* eBPF program to attach */
1380 __u32 replace_bpf_fd; /* previously attached eBPF
1381 * program to replace if
1382 * BPF_F_REPLACE is used
1386 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
1389 __u32 data_size_in; /* input: len of data_in */
1390 __u32 data_size_out; /* input/output: len of data_out
1391 * returns ENOSPC if data_out
1394 __aligned_u64 data_in;
1395 __aligned_u64 data_out;
1398 __u32 ctx_size_in; /* input: len of ctx_in */
1399 __u32 ctx_size_out; /* input/output: len of ctx_out
1400 * returns ENOSPC if ctx_out
1403 __aligned_u64 ctx_in;
1404 __aligned_u64 ctx_out;
1410 struct { /* anonymous struct used by BPF_*_GET_*_ID */
1422 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
1428 struct { /* anonymous struct used by BPF_PROG_QUERY command */
1429 __u32 target_fd; /* container object to query */
1433 __aligned_u64 prog_ids;
1435 /* output: per-program attach_flags.
1436 * not allowed to be set during effective query.
1438 __aligned_u64 prog_attach_flags;
1441 struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
1446 struct { /* anonymous struct for BPF_BTF_LOAD */
1448 __aligned_u64 btf_log_buf;
1451 __u32 btf_log_level;
1455 __u32 pid; /* input: pid */
1456 __u32 fd; /* input: fd */
1457 __u32 flags; /* input: flags */
1458 __u32 buf_len; /* input/output: buf len */
1459 __aligned_u64 buf; /* input/output:
1460 * tp_name for tracepoint
1462 * filename for uprobe
1464 __u32 prog_id; /* output: prod_id */
1465 __u32 fd_type; /* output: BPF_FD_TYPE_* */
1466 __u64 probe_offset; /* output: probe_offset */
1467 __u64 probe_addr; /* output: probe_addr */
1470 struct { /* struct used by BPF_LINK_CREATE command */
1471 __u32 prog_fd; /* eBPF program to attach */
1473 __u32 target_fd; /* object to attach to */
1474 __u32 target_ifindex; /* target ifindex */
1476 __u32 attach_type; /* attach type */
1477 __u32 flags; /* extra flags */
1479 __u32 target_btf_id; /* btf_id of target to attach to */
1481 __aligned_u64 iter_info; /* extra bpf_iter_link_info */
1482 __u32 iter_info_len; /* iter_info length */
1485 /* black box user-provided value passed through
1486 * to BPF program at the execution time and
1487 * accessible through bpf_get_attach_cookie() BPF helper
1495 __aligned_u64 addrs;
1496 __aligned_u64 cookies;
1499 /* this is overlaid with the target_btf_id above. */
1500 __u32 target_btf_id;
1501 /* black box user-provided value passed through
1502 * to BPF program at the execution time and
1503 * accessible through bpf_get_attach_cookie() BPF helper
1510 struct { /* struct used by BPF_LINK_UPDATE command */
1511 __u32 link_fd; /* link fd */
1512 /* new program fd to update link with */
1514 __u32 flags; /* extra flags */
1515 /* expected link's program fd; is specified only if
1516 * BPF_F_REPLACE flag is set in flags */
1524 struct { /* struct used by BPF_ENABLE_STATS command */
1528 struct { /* struct used by BPF_ITER_CREATE command */
1533 struct { /* struct used by BPF_PROG_BIND_MAP command */
1536 __u32 flags; /* extra flags */
1539 } __attribute__((aligned(8)));
1541 /* The description below is an attempt at providing documentation to eBPF
1542 * developers about the multiple available eBPF helper functions. It can be
1543 * parsed and used to produce a manual page. The workflow is the following,
1544 * and requires the rst2man utility:
1546 * $ ./scripts/bpf_doc.py \
1547 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
1548 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
1549 * $ man /tmp/bpf-helpers.7
1551 * Note that in order to produce this external documentation, some RST
1552 * formatting is used in the descriptions to get "bold" and "italics" in
1553 * manual pages. Also note that the few trailing white spaces are
1554 * intentional, removing them would break paragraphs for rst2man.
1556 * Start of BPF helper function descriptions:
1558 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
1560 * Perform a lookup in *map* for an entry associated to *key*.
1562 * Map value associated to *key*, or **NULL** if no entry was
1565 * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
1567 * Add or update the value of the entry associated to *key* in
1568 * *map* with *value*. *flags* is one of:
1571 * The entry for *key* must not exist in the map.
1573 * The entry for *key* must already exist in the map.
1575 * No condition on the existence of the entry for *key*.
1577 * Flag value **BPF_NOEXIST** cannot be used for maps of types
1578 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
1579 * elements always exist), the helper would return an error.
1581 * 0 on success, or a negative error in case of failure.
1583 * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
1585 * Delete entry with *key* from *map*.
1587 * 0 on success, or a negative error in case of failure.
1589 * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
1591 * For tracing programs, safely attempt to read *size* bytes from
1592 * kernel space address *unsafe_ptr* and store the data in *dst*.
1594 * Generally, use **bpf_probe_read_user**\ () or
1595 * **bpf_probe_read_kernel**\ () instead.
1597 * 0 on success, or a negative error in case of failure.
1599 * u64 bpf_ktime_get_ns(void)
1601 * Return the time elapsed since system boot, in nanoseconds.
1602 * Does not include time the system was suspended.
1603 * See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
1607 * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
1609 * This helper is a "printk()-like" facility for debugging. It
1610 * prints a message defined by format *fmt* (of size *fmt_size*)
1611 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
1612 * available. It can take up to three additional **u64**
1613 * arguments (as an eBPF helpers, the total number of arguments is
1616 * Each time the helper is called, it appends a line to the trace.
1617 * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
1618 * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
1619 * The format of the trace is customizable, and the exact output
1620 * one will get depends on the options set in
1621 * *\/sys/kernel/debug/tracing/trace_options* (see also the
1622 * *README* file under the same directory). However, it usually
1623 * defaults to something like:
1627 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg>
1631 * * ``telnet`` is the name of the current task.
1632 * * ``470`` is the PID of the current task.
1633 * * ``001`` is the CPU number on which the task is
1635 * * In ``.N..``, each character refers to a set of
1636 * options (whether irqs are enabled, scheduling
1637 * options, whether hard/softirqs are running, level of
1638 * preempt_disabled respectively). **N** means that
1639 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
1641 * * ``419421.045894`` is a timestamp.
1642 * * ``0x00000001`` is a fake value used by BPF for the
1643 * instruction pointer register.
1644 * * ``<formatted msg>`` is the message formatted with
1647 * The conversion specifiers supported by *fmt* are similar, but
1648 * more limited than for printk(). They are **%d**, **%i**,
1649 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
1650 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
1651 * of field, padding with zeroes, etc.) is available, and the
1652 * helper will return **-EINVAL** (but print nothing) if it
1653 * encounters an unknown specifier.
1655 * Also, note that **bpf_trace_printk**\ () is slow, and should
1656 * only be used for debugging purposes. For this reason, a notice
1657 * block (spanning several lines) is printed to kernel logs and
1658 * states that the helper should not be used "for production use"
1659 * the first time this helper is used (or more precisely, when
1660 * **trace_printk**\ () buffers are allocated). For passing values
1661 * to user space, perf events should be preferred.
1663 * The number of bytes written to the buffer, or a negative error
1664 * in case of failure.
1666 * u32 bpf_get_prandom_u32(void)
1668 * Get a pseudo-random number.
1670 * From a security point of view, this helper uses its own
1671 * pseudo-random internal state, and cannot be used to infer the
1672 * seed of other random functions in the kernel. However, it is
1673 * essential to note that the generator used by the helper is not
1674 * cryptographically secure.
1676 * A random 32-bit unsigned value.
1678 * u32 bpf_get_smp_processor_id(void)
1680 * Get the SMP (symmetric multiprocessing) processor id. Note that
1681 * all programs run with migration disabled, which means that the
1682 * SMP processor id is stable during all the execution of the
1685 * The SMP id of the processor running the program.
1687 * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
1689 * Store *len* bytes from address *from* into the packet
1690 * associated to *skb*, at *offset*. *flags* are a combination of
1691 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
1692 * checksum for the packet after storing the bytes) and
1693 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
1694 * **->swhash** and *skb*\ **->l4hash** to 0).
1696 * A call to this helper is susceptible to change the underlying
1697 * packet buffer. Therefore, at load time, all checks on pointers
1698 * previously done by the verifier are invalidated and must be
1699 * performed again, if the helper is used in combination with
1700 * direct packet access.
1702 * 0 on success, or a negative error in case of failure.
1704 * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
1706 * Recompute the layer 3 (e.g. IP) checksum for the packet
1707 * associated to *skb*. Computation is incremental, so the helper
1708 * must know the former value of the header field that was
1709 * modified (*from*), the new value of this field (*to*), and the
1710 * number of bytes (2 or 4) for this field, stored in *size*.
1711 * Alternatively, it is possible to store the difference between
1712 * the previous and the new values of the header field in *to*, by
1713 * setting *from* and *size* to 0. For both methods, *offset*
1714 * indicates the location of the IP checksum within the packet.
1716 * This helper works in combination with **bpf_csum_diff**\ (),
1717 * which does not update the checksum in-place, but offers more
1718 * flexibility and can handle sizes larger than 2 or 4 for the
1719 * checksum to update.
1721 * A call to this helper is susceptible to change the underlying
1722 * packet buffer. Therefore, at load time, all checks on pointers
1723 * previously done by the verifier are invalidated and must be
1724 * performed again, if the helper is used in combination with
1725 * direct packet access.
1727 * 0 on success, or a negative error in case of failure.
1729 * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
1731 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
1732 * packet associated to *skb*. Computation is incremental, so the
1733 * helper must know the former value of the header field that was
1734 * modified (*from*), the new value of this field (*to*), and the
1735 * number of bytes (2 or 4) for this field, stored on the lowest
1736 * four bits of *flags*. Alternatively, it is possible to store
1737 * the difference between the previous and the new values of the
1738 * header field in *to*, by setting *from* and the four lowest
1739 * bits of *flags* to 0. For both methods, *offset* indicates the
1740 * location of the IP checksum within the packet. In addition to
1741 * the size of the field, *flags* can be added (bitwise OR) actual
1742 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
1743 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
1744 * for updates resulting in a null checksum the value is set to
1745 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
1746 * the checksum is to be computed against a pseudo-header.
1748 * This helper works in combination with **bpf_csum_diff**\ (),
1749 * which does not update the checksum in-place, but offers more
1750 * flexibility and can handle sizes larger than 2 or 4 for the
1751 * checksum to update.
1753 * A call to this helper is susceptible to change the underlying
1754 * packet buffer. Therefore, at load time, all checks on pointers
1755 * previously done by the verifier are invalidated and must be
1756 * performed again, if the helper is used in combination with
1757 * direct packet access.
1759 * 0 on success, or a negative error in case of failure.
1761 * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
1763 * This special helper is used to trigger a "tail call", or in
1764 * other words, to jump into another eBPF program. The same stack
1765 * frame is used (but values on stack and in registers for the
1766 * caller are not accessible to the callee). This mechanism allows
1767 * for program chaining, either for raising the maximum number of
1768 * available eBPF instructions, or to execute given programs in
1769 * conditional blocks. For security reasons, there is an upper
1770 * limit to the number of successive tail calls that can be
1773 * Upon call of this helper, the program attempts to jump into a
1774 * program referenced at index *index* in *prog_array_map*, a
1775 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
1776 * *ctx*, a pointer to the context.
1778 * If the call succeeds, the kernel immediately runs the first
1779 * instruction of the new program. This is not a function call,
1780 * and it never returns to the previous program. If the call
1781 * fails, then the helper has no effect, and the caller continues
1782 * to run its subsequent instructions. A call can fail if the
1783 * destination program for the jump does not exist (i.e. *index*
1784 * is superior to the number of entries in *prog_array_map*), or
1785 * if the maximum number of tail calls has been reached for this
1786 * chain of programs. This limit is defined in the kernel by the
1787 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
1788 * which is currently set to 33.
1790 * 0 on success, or a negative error in case of failure.
1792 * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
1794 * Clone and redirect the packet associated to *skb* to another
1795 * net device of index *ifindex*. Both ingress and egress
1796 * interfaces can be used for redirection. The **BPF_F_INGRESS**
1797 * value in *flags* is used to make the distinction (ingress path
1798 * is selected if the flag is present, egress path otherwise).
1799 * This is the only flag supported for now.
1801 * In comparison with **bpf_redirect**\ () helper,
1802 * **bpf_clone_redirect**\ () has the associated cost of
1803 * duplicating the packet buffer, but this can be executed out of
1804 * the eBPF program. Conversely, **bpf_redirect**\ () is more
1805 * efficient, but it is handled through an action code where the
1806 * redirection happens only after the eBPF program has returned.
1808 * A call to this helper is susceptible to change the underlying
1809 * packet buffer. Therefore, at load time, all checks on pointers
1810 * previously done by the verifier are invalidated and must be
1811 * performed again, if the helper is used in combination with
1812 * direct packet access.
1814 * 0 on success, or a negative error in case of failure.
1816 * u64 bpf_get_current_pid_tgid(void)
1818 * Get the current pid and tgid.
1820 * A 64-bit integer containing the current tgid and pid, and
1822 * *current_task*\ **->tgid << 32 \|**
1823 * *current_task*\ **->pid**.
1825 * u64 bpf_get_current_uid_gid(void)
1827 * Get the current uid and gid.
1829 * A 64-bit integer containing the current GID and UID, and
1830 * created as such: *current_gid* **<< 32 \|** *current_uid*.
1832 * long bpf_get_current_comm(void *buf, u32 size_of_buf)
1834 * Copy the **comm** attribute of the current task into *buf* of
1835 * *size_of_buf*. The **comm** attribute contains the name of
1836 * the executable (excluding the path) for the current task. The
1837 * *size_of_buf* must be strictly positive. On success, the
1838 * helper makes sure that the *buf* is NUL-terminated. On failure,
1839 * it is filled with zeroes.
1841 * 0 on success, or a negative error in case of failure.
1843 * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
1845 * Retrieve the classid for the current task, i.e. for the net_cls
1846 * cgroup to which *skb* belongs.
1848 * This helper can be used on TC egress path, but not on ingress.
1850 * The net_cls cgroup provides an interface to tag network packets
1851 * based on a user-provided identifier for all traffic coming from
1852 * the tasks belonging to the related cgroup. See also the related
1853 * kernel documentation, available from the Linux sources in file
1854 * *Documentation/admin-guide/cgroup-v1/net_cls.rst*.
1856 * The Linux kernel has two versions for cgroups: there are
1857 * cgroups v1 and cgroups v2. Both are available to users, who can
1858 * use a mixture of them, but note that the net_cls cgroup is for
1859 * cgroup v1 only. This makes it incompatible with BPF programs
1860 * run on cgroups, which is a cgroup-v2-only feature (a socket can
1861 * only hold data for one version of cgroups at a time).
1863 * This helper is only available is the kernel was compiled with
1864 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
1865 * "**y**" or to "**m**".
1867 * The classid, or 0 for the default unconfigured classid.
1869 * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
1871 * Push a *vlan_tci* (VLAN tag control information) of protocol
1872 * *vlan_proto* to the packet associated to *skb*, then update
1873 * the checksum. Note that if *vlan_proto* is different from
1874 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
1875 * be **ETH_P_8021Q**.
1877 * A call to this helper is susceptible to change the underlying
1878 * packet buffer. Therefore, at load time, all checks on pointers
1879 * previously done by the verifier are invalidated and must be
1880 * performed again, if the helper is used in combination with
1881 * direct packet access.
1883 * 0 on success, or a negative error in case of failure.
1885 * long bpf_skb_vlan_pop(struct sk_buff *skb)
1887 * Pop a VLAN header from the packet associated to *skb*.
1889 * A call to this helper is susceptible to change the underlying
1890 * packet buffer. Therefore, at load time, all checks on pointers
1891 * previously done by the verifier are invalidated and must be
1892 * performed again, if the helper is used in combination with
1893 * direct packet access.
1895 * 0 on success, or a negative error in case of failure.
1897 * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
1899 * Get tunnel metadata. This helper takes a pointer *key* to an
1900 * empty **struct bpf_tunnel_key** of **size**, that will be
1901 * filled with tunnel metadata for the packet associated to *skb*.
1902 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
1903 * indicates that the tunnel is based on IPv6 protocol instead of
1906 * The **struct bpf_tunnel_key** is an object that generalizes the
1907 * principal parameters used by various tunneling protocols into a
1908 * single struct. This way, it can be used to easily make a
1909 * decision based on the contents of the encapsulation header,
1910 * "summarized" in this struct. In particular, it holds the IP
1911 * address of the remote end (IPv4 or IPv6, depending on the case)
1912 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
1913 * this struct exposes the *key*\ **->tunnel_id**, which is
1914 * generally mapped to a VNI (Virtual Network Identifier), making
1915 * it programmable together with the **bpf_skb_set_tunnel_key**\
1918 * Let's imagine that the following code is part of a program
1919 * attached to the TC ingress interface, on one end of a GRE
1920 * tunnel, and is supposed to filter out all messages coming from
1921 * remote ends with IPv4 address other than 10.0.0.1:
1926 * struct bpf_tunnel_key key = {};
1928 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
1930 * return TC_ACT_SHOT; // drop packet
1932 * if (key.remote_ipv4 != 0x0a000001)
1933 * return TC_ACT_SHOT; // drop packet
1935 * return TC_ACT_OK; // accept packet
1937 * This interface can also be used with all encapsulation devices
1938 * that can operate in "collect metadata" mode: instead of having
1939 * one network device per specific configuration, the "collect
1940 * metadata" mode only requires a single device where the
1941 * configuration can be extracted from this helper.
1943 * This can be used together with various tunnels such as VXLan,
1944 * Geneve, GRE or IP in IP (IPIP).
1946 * 0 on success, or a negative error in case of failure.
1948 * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
1950 * Populate tunnel metadata for packet associated to *skb.* The
1951 * tunnel metadata is set to the contents of *key*, of *size*. The
1952 * *flags* can be set to a combination of the following values:
1954 * **BPF_F_TUNINFO_IPV6**
1955 * Indicate that the tunnel is based on IPv6 protocol
1957 * **BPF_F_ZERO_CSUM_TX**
1958 * For IPv4 packets, add a flag to tunnel metadata
1959 * indicating that checksum computation should be skipped
1960 * and checksum set to zeroes.
1961 * **BPF_F_DONT_FRAGMENT**
1962 * Add a flag to tunnel metadata indicating that the
1963 * packet should not be fragmented.
1964 * **BPF_F_SEQ_NUMBER**
1965 * Add a flag to tunnel metadata indicating that a
1966 * sequence number should be added to tunnel header before
1967 * sending the packet. This flag was added for GRE
1968 * encapsulation, but might be used with other protocols
1969 * as well in the future.
1971 * Here is a typical usage on the transmit path:
1975 * struct bpf_tunnel_key key;
1977 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
1978 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
1980 * See also the description of the **bpf_skb_get_tunnel_key**\ ()
1981 * helper for additional information.
1983 * 0 on success, or a negative error in case of failure.
1985 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
1987 * Read the value of a perf event counter. This helper relies on a
1988 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
1989 * the perf event counter is selected when *map* is updated with
1990 * perf event file descriptors. The *map* is an array whose size
1991 * is the number of available CPUs, and each cell contains a value
1992 * relative to one CPU. The value to retrieve is indicated by
1993 * *flags*, that contains the index of the CPU to look up, masked
1994 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1995 * **BPF_F_CURRENT_CPU** to indicate that the value for the
1996 * current CPU should be retrieved.
1998 * Note that before Linux 4.13, only hardware perf event can be
2001 * Also, be aware that the newer helper
2002 * **bpf_perf_event_read_value**\ () is recommended over
2003 * **bpf_perf_event_read**\ () in general. The latter has some ABI
2004 * quirks where error and counter value are used as a return code
2005 * (which is wrong to do since ranges may overlap). This issue is
2006 * fixed with **bpf_perf_event_read_value**\ (), which at the same
2007 * time provides more features over the **bpf_perf_event_read**\
2008 * () interface. Please refer to the description of
2009 * **bpf_perf_event_read_value**\ () for details.
2011 * The value of the perf event counter read from the map, or a
2012 * negative error code in case of failure.
2014 * long bpf_redirect(u32 ifindex, u64 flags)
2016 * Redirect the packet to another net device of index *ifindex*.
2017 * This helper is somewhat similar to **bpf_clone_redirect**\
2018 * (), except that the packet is not cloned, which provides
2019 * increased performance.
2021 * Except for XDP, both ingress and egress interfaces can be used
2022 * for redirection. The **BPF_F_INGRESS** value in *flags* is used
2023 * to make the distinction (ingress path is selected if the flag
2024 * is present, egress path otherwise). Currently, XDP only
2025 * supports redirection to the egress interface, and accepts no
2028 * The same effect can also be attained with the more generic
2029 * **bpf_redirect_map**\ (), which uses a BPF map to store the
2030 * redirect target instead of providing it directly to the helper.
2032 * For XDP, the helper returns **XDP_REDIRECT** on success or
2033 * **XDP_ABORTED** on error. For other program types, the values
2034 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
2037 * u32 bpf_get_route_realm(struct sk_buff *skb)
2039 * Retrieve the realm or the route, that is to say the
2040 * **tclassid** field of the destination for the *skb*. The
2041 * identifier retrieved is a user-provided tag, similar to the
2042 * one used with the net_cls cgroup (see description for
2043 * **bpf_get_cgroup_classid**\ () helper), but here this tag is
2044 * held by a route (a destination entry), not by a task.
2046 * Retrieving this identifier works with the clsact TC egress hook
2047 * (see also **tc-bpf(8)**), or alternatively on conventional
2048 * classful egress qdiscs, but not on TC ingress path. In case of
2049 * clsact TC egress hook, this has the advantage that, internally,
2050 * the destination entry has not been dropped yet in the transmit
2051 * path. Therefore, the destination entry does not need to be
2052 * artificially held via **netif_keep_dst**\ () for a classful
2053 * qdisc until the *skb* is freed.
2055 * This helper is available only if the kernel was compiled with
2056 * **CONFIG_IP_ROUTE_CLASSID** configuration option.
2058 * The realm of the route for the packet associated to *skb*, or 0
2059 * if none was found.
2061 * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
2063 * Write raw *data* blob into a special BPF perf event held by
2064 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
2065 * event must have the following attributes: **PERF_SAMPLE_RAW**
2066 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
2067 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
2069 * The *flags* are used to indicate the index in *map* for which
2070 * the value must be put, masked with **BPF_F_INDEX_MASK**.
2071 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
2072 * to indicate that the index of the current CPU core should be
2075 * The value to write, of *size*, is passed through eBPF stack and
2076 * pointed by *data*.
2078 * The context of the program *ctx* needs also be passed to the
2081 * On user space, a program willing to read the values needs to
2082 * call **perf_event_open**\ () on the perf event (either for
2083 * one or for all CPUs) and to store the file descriptor into the
2084 * *map*. This must be done before the eBPF program can send data
2085 * into it. An example is available in file
2086 * *samples/bpf/trace_output_user.c* in the Linux kernel source
2087 * tree (the eBPF program counterpart is in
2088 * *samples/bpf/trace_output_kern.c*).
2090 * **bpf_perf_event_output**\ () achieves better performance
2091 * than **bpf_trace_printk**\ () for sharing data with user
2092 * space, and is much better suitable for streaming data from eBPF
2095 * Note that this helper is not restricted to tracing use cases
2096 * and can be used with programs attached to TC or XDP as well,
2097 * where it allows for passing data to user space listeners. Data
2100 * * Only custom structs,
2101 * * Only the packet payload, or
2102 * * A combination of both.
2104 * 0 on success, or a negative error in case of failure.
2106 * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
2108 * This helper was provided as an easy way to load data from a
2109 * packet. It can be used to load *len* bytes from *offset* from
2110 * the packet associated to *skb*, into the buffer pointed by
2113 * Since Linux 4.7, usage of this helper has mostly been replaced
2114 * by "direct packet access", enabling packet data to be
2115 * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
2116 * pointing respectively to the first byte of packet data and to
2117 * the byte after the last byte of packet data. However, it
2118 * remains useful if one wishes to read large quantities of data
2119 * at once from a packet into the eBPF stack.
2121 * 0 on success, or a negative error in case of failure.
2123 * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
2125 * Walk a user or a kernel stack and return its id. To achieve
2126 * this, the helper needs *ctx*, which is a pointer to the context
2127 * on which the tracing program is executed, and a pointer to a
2128 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
2130 * The last argument, *flags*, holds the number of stack frames to
2131 * skip (from 0 to 255), masked with
2132 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
2133 * a combination of the following flags:
2135 * **BPF_F_USER_STACK**
2136 * Collect a user space stack instead of a kernel stack.
2137 * **BPF_F_FAST_STACK_CMP**
2138 * Compare stacks by hash only.
2139 * **BPF_F_REUSE_STACKID**
2140 * If two different stacks hash into the same *stackid*,
2141 * discard the old one.
2143 * The stack id retrieved is a 32 bit long integer handle which
2144 * can be further combined with other data (including other stack
2145 * ids) and used as a key into maps. This can be useful for
2146 * generating a variety of graphs (such as flame graphs or off-cpu
2149 * For walking a stack, this helper is an improvement over
2150 * **bpf_probe_read**\ (), which can be used with unrolled loops
2151 * but is not efficient and consumes a lot of eBPF instructions.
2152 * Instead, **bpf_get_stackid**\ () can collect up to
2153 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
2154 * this limit can be controlled with the **sysctl** program, and
2155 * that it should be manually increased in order to profile long
2156 * user stacks (such as stacks for Java programs). To do so, use:
2160 * # sysctl kernel.perf_event_max_stack=<new value>
2162 * The positive or null stack id on success, or a negative error
2163 * in case of failure.
2165 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
2167 * Compute a checksum difference, from the raw buffer pointed by
2168 * *from*, of length *from_size* (that must be a multiple of 4),
2169 * towards the raw buffer pointed by *to*, of size *to_size*
2170 * (same remark). An optional *seed* can be added to the value
2171 * (this can be cascaded, the seed may come from a previous call
2174 * This is flexible enough to be used in several ways:
2176 * * With *from_size* == 0, *to_size* > 0 and *seed* set to
2177 * checksum, it can be used when pushing new data.
2178 * * With *from_size* > 0, *to_size* == 0 and *seed* set to
2179 * checksum, it can be used when removing data from a packet.
2180 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
2181 * can be used to compute a diff. Note that *from_size* and
2182 * *to_size* do not need to be equal.
2184 * This helper can be used in combination with
2185 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
2186 * which one can feed in the difference computed with
2187 * **bpf_csum_diff**\ ().
2189 * The checksum result, or a negative error code in case of
2192 * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
2194 * Retrieve tunnel options metadata for the packet associated to
2195 * *skb*, and store the raw tunnel option data to the buffer *opt*
2198 * This helper can be used with encapsulation devices that can
2199 * operate in "collect metadata" mode (please refer to the related
2200 * note in the description of **bpf_skb_get_tunnel_key**\ () for
2201 * more details). A particular example where this can be used is
2202 * in combination with the Geneve encapsulation protocol, where it
2203 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
2204 * and retrieving arbitrary TLVs (Type-Length-Value headers) from
2205 * the eBPF program. This allows for full customization of these
2208 * The size of the option data retrieved.
2210 * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
2212 * Set tunnel options metadata for the packet associated to *skb*
2213 * to the option data contained in the raw buffer *opt* of *size*.
2215 * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
2216 * helper for additional information.
2218 * 0 on success, or a negative error in case of failure.
2220 * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
2222 * Change the protocol of the *skb* to *proto*. Currently
2223 * supported are transition from IPv4 to IPv6, and from IPv6 to
2224 * IPv4. The helper takes care of the groundwork for the
2225 * transition, including resizing the socket buffer. The eBPF
2226 * program is expected to fill the new headers, if any, via
2227 * **skb_store_bytes**\ () and to recompute the checksums with
2228 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
2229 * (). The main case for this helper is to perform NAT64
2230 * operations out of an eBPF program.
2232 * Internally, the GSO type is marked as dodgy so that headers are
2233 * checked and segments are recalculated by the GSO/GRO engine.
2234 * The size for GSO target is adapted as well.
2236 * All values for *flags* are reserved for future usage, and must
2239 * A call to this helper is susceptible to change the underlying
2240 * packet buffer. Therefore, at load time, all checks on pointers
2241 * previously done by the verifier are invalidated and must be
2242 * performed again, if the helper is used in combination with
2243 * direct packet access.
2245 * 0 on success, or a negative error in case of failure.
2247 * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
2249 * Change the packet type for the packet associated to *skb*. This
2250 * comes down to setting *skb*\ **->pkt_type** to *type*, except
2251 * the eBPF program does not have a write access to *skb*\
2252 * **->pkt_type** beside this helper. Using a helper here allows
2253 * for graceful handling of errors.
2255 * The major use case is to change incoming *skb*s to
2256 * **PACKET_HOST** in a programmatic way instead of having to
2257 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
2260 * Note that *type* only allows certain values. At this time, they
2265 * **PACKET_BROADCAST**
2266 * Send packet to all.
2267 * **PACKET_MULTICAST**
2268 * Send packet to group.
2269 * **PACKET_OTHERHOST**
2270 * Send packet to someone else.
2272 * 0 on success, or a negative error in case of failure.
2274 * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
2276 * Check whether *skb* is a descendant of the cgroup2 held by
2277 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
2279 * The return value depends on the result of the test, and can be:
2281 * * 0, if the *skb* failed the cgroup2 descendant test.
2282 * * 1, if the *skb* succeeded the cgroup2 descendant test.
2283 * * A negative error code, if an error occurred.
2285 * u32 bpf_get_hash_recalc(struct sk_buff *skb)
2287 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
2288 * not set, in particular if the hash was cleared due to mangling,
2289 * recompute this hash. Later accesses to the hash can be done
2290 * directly with *skb*\ **->hash**.
2292 * Calling **bpf_set_hash_invalid**\ (), changing a packet
2293 * prototype with **bpf_skb_change_proto**\ (), or calling
2294 * **bpf_skb_store_bytes**\ () with the
2295 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
2296 * the hash and to trigger a new computation for the next call to
2297 * **bpf_get_hash_recalc**\ ().
2301 * u64 bpf_get_current_task(void)
2303 * Get the current task.
2305 * A pointer to the current task struct.
2307 * long bpf_probe_write_user(void *dst, const void *src, u32 len)
2309 * Attempt in a safe way to write *len* bytes from the buffer
2310 * *src* to *dst* in memory. It only works for threads that are in
2311 * user context, and *dst* must be a valid user space address.
2313 * This helper should not be used to implement any kind of
2314 * security mechanism because of TOC-TOU attacks, but rather to
2315 * debug, divert, and manipulate execution of semi-cooperative
2318 * Keep in mind that this feature is meant for experiments, and it
2319 * has a risk of crashing the system and running programs.
2320 * Therefore, when an eBPF program using this helper is attached,
2321 * a warning including PID and process name is printed to kernel
2324 * 0 on success, or a negative error in case of failure.
2326 * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
2328 * Check whether the probe is being run is the context of a given
2329 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
2330 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
2332 * The return value depends on the result of the test, and can be:
2334 * * 1, if current task belongs to the cgroup2.
2335 * * 0, if current task does not belong to the cgroup2.
2336 * * A negative error code, if an error occurred.
2338 * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
2340 * Resize (trim or grow) the packet associated to *skb* to the
2341 * new *len*. The *flags* are reserved for future usage, and must
2344 * The basic idea is that the helper performs the needed work to
2345 * change the size of the packet, then the eBPF program rewrites
2346 * the rest via helpers like **bpf_skb_store_bytes**\ (),
2347 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
2348 * and others. This helper is a slow path utility intended for
2349 * replies with control messages. And because it is targeted for
2350 * slow path, the helper itself can afford to be slow: it
2351 * implicitly linearizes, unclones and drops offloads from the
2354 * A call to this helper is susceptible to change the underlying
2355 * packet buffer. Therefore, at load time, all checks on pointers
2356 * previously done by the verifier are invalidated and must be
2357 * performed again, if the helper is used in combination with
2358 * direct packet access.
2360 * 0 on success, or a negative error in case of failure.
2362 * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
2364 * Pull in non-linear data in case the *skb* is non-linear and not
2365 * all of *len* are part of the linear section. Make *len* bytes
2366 * from *skb* readable and writable. If a zero value is passed for
2367 * *len*, then all bytes in the linear part of *skb* will be made
2368 * readable and writable.
2370 * This helper is only needed for reading and writing with direct
2373 * For direct packet access, testing that offsets to access
2374 * are within packet boundaries (test on *skb*\ **->data_end**) is
2375 * susceptible to fail if offsets are invalid, or if the requested
2376 * data is in non-linear parts of the *skb*. On failure the
2377 * program can just bail out, or in the case of a non-linear
2378 * buffer, use a helper to make the data available. The
2379 * **bpf_skb_load_bytes**\ () helper is a first solution to access
2380 * the data. Another one consists in using **bpf_skb_pull_data**
2381 * to pull in once the non-linear parts, then retesting and
2382 * eventually access the data.
2384 * At the same time, this also makes sure the *skb* is uncloned,
2385 * which is a necessary condition for direct write. As this needs
2386 * to be an invariant for the write part only, the verifier
2387 * detects writes and adds a prologue that is calling
2388 * **bpf_skb_pull_data()** to effectively unclone the *skb* from
2389 * the very beginning in case it is indeed cloned.
2391 * A call to this helper is susceptible to change the underlying
2392 * packet buffer. Therefore, at load time, all checks on pointers
2393 * previously done by the verifier are invalidated and must be
2394 * performed again, if the helper is used in combination with
2395 * direct packet access.
2397 * 0 on success, or a negative error in case of failure.
2399 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
2401 * Add the checksum *csum* into *skb*\ **->csum** in case the
2402 * driver has supplied a checksum for the entire packet into that
2403 * field. Return an error otherwise. This helper is intended to be
2404 * used in combination with **bpf_csum_diff**\ (), in particular
2405 * when the checksum needs to be updated after data has been
2406 * written into the packet through direct packet access.
2408 * The checksum on success, or a negative error code in case of
2411 * void bpf_set_hash_invalid(struct sk_buff *skb)
2413 * Invalidate the current *skb*\ **->hash**. It can be used after
2414 * mangling on headers through direct packet access, in order to
2415 * indicate that the hash is outdated and to trigger a
2416 * recalculation the next time the kernel tries to access this
2417 * hash or when the **bpf_get_hash_recalc**\ () helper is called.
2421 * long bpf_get_numa_node_id(void)
2423 * Return the id of the current NUMA node. The primary use case
2424 * for this helper is the selection of sockets for the local NUMA
2425 * node, when the program is attached to sockets using the
2426 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
2427 * but the helper is also available to other eBPF program types,
2428 * similarly to **bpf_get_smp_processor_id**\ ().
2430 * The id of current NUMA node.
2432 * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
2434 * Grows headroom of packet associated to *skb* and adjusts the
2435 * offset of the MAC header accordingly, adding *len* bytes of
2436 * space. It automatically extends and reallocates memory as
2439 * This helper can be used on a layer 3 *skb* to push a MAC header
2440 * for redirection into a layer 2 device.
2442 * All values for *flags* are reserved for future usage, and must
2445 * A call to this helper is susceptible to change the underlying
2446 * packet buffer. Therefore, at load time, all checks on pointers
2447 * previously done by the verifier are invalidated and must be
2448 * performed again, if the helper is used in combination with
2449 * direct packet access.
2451 * 0 on success, or a negative error in case of failure.
2453 * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
2455 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
2456 * it is possible to use a negative value for *delta*. This helper
2457 * can be used to prepare the packet for pushing or popping
2460 * A call to this helper is susceptible to change the underlying
2461 * packet buffer. Therefore, at load time, all checks on pointers
2462 * previously done by the verifier are invalidated and must be
2463 * performed again, if the helper is used in combination with
2464 * direct packet access.
2466 * 0 on success, or a negative error in case of failure.
2468 * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
2470 * Copy a NUL terminated string from an unsafe kernel address
2471 * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
2474 * Generally, use **bpf_probe_read_user_str**\ () or
2475 * **bpf_probe_read_kernel_str**\ () instead.
2477 * On success, the strictly positive length of the string,
2478 * including the trailing NUL character. On error, a negative
2481 * u64 bpf_get_socket_cookie(struct sk_buff *skb)
2483 * If the **struct sk_buff** pointed by *skb* has a known socket,
2484 * retrieve the cookie (generated by the kernel) of this socket.
2485 * If no cookie has been set yet, generate a new cookie. Once
2486 * generated, the socket cookie remains stable for the life of the
2487 * socket. This helper can be useful for monitoring per socket
2488 * networking traffic statistics as it provides a global socket
2489 * identifier that can be assumed unique.
2491 * A 8-byte long unique number on success, or 0 if the socket
2492 * field is missing inside *skb*.
2494 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
2496 * Equivalent to bpf_get_socket_cookie() helper that accepts
2497 * *skb*, but gets socket from **struct bpf_sock_addr** context.
2499 * A 8-byte long unique number.
2501 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
2503 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
2504 * *skb*, but gets socket from **struct bpf_sock_ops** context.
2506 * A 8-byte long unique number.
2508 * u64 bpf_get_socket_cookie(struct sock *sk)
2510 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
2511 * *sk*, but gets socket from a BTF **struct sock**. This helper
2512 * also works for sleepable programs.
2514 * A 8-byte long unique number or 0 if *sk* is NULL.
2516 * u32 bpf_get_socket_uid(struct sk_buff *skb)
2518 * Get the owner UID of the socked associated to *skb*.
2520 * The owner UID of the socket associated to *skb*. If the socket
2521 * is **NULL**, or if it is not a full socket (i.e. if it is a
2522 * time-wait or a request socket instead), **overflowuid** value
2523 * is returned (note that **overflowuid** might also be the actual
2524 * UID value for the socket).
2526 * long bpf_set_hash(struct sk_buff *skb, u32 hash)
2528 * Set the full hash for *skb* (set the field *skb*\ **->hash**)
2533 * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
2535 * Emulate a call to **setsockopt()** on the socket associated to
2536 * *bpf_socket*, which must be a full socket. The *level* at
2537 * which the option resides and the name *optname* of the option
2538 * must be specified, see **setsockopt(2)** for more information.
2539 * The option value of length *optlen* is pointed by *optval*.
2541 * *bpf_socket* should be one of the following:
2543 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
2544 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
2545 * and **BPF_CGROUP_INET6_CONNECT**.
2547 * This helper actually implements a subset of **setsockopt()**.
2548 * It supports the following *level*\ s:
2550 * * **SOL_SOCKET**, which supports the following *optname*\ s:
2551 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
2552 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
2553 * **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
2554 * * **IPPROTO_TCP**, which supports the following *optname*\ s:
2555 * **TCP_CONGESTION**, **TCP_BPF_IW**,
2556 * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
2557 * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
2558 * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**.
2559 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
2560 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
2562 * 0 on success, or a negative error in case of failure.
2564 * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
2566 * Grow or shrink the room for data in the packet associated to
2567 * *skb* by *len_diff*, and according to the selected *mode*.
2569 * By default, the helper will reset any offloaded checksum
2570 * indicator of the skb to CHECKSUM_NONE. This can be avoided
2571 * by the following flag:
2573 * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
2574 * checksum data of the skb to CHECKSUM_NONE.
2576 * There are two supported modes at this time:
2578 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
2579 * (room space is added or removed below the layer 2 header).
2581 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
2582 * (room space is added or removed below the layer 3 header).
2584 * The following flags are supported at this time:
2586 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
2587 * Adjusting mss in this way is not allowed for datagrams.
2589 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
2590 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
2591 * Any new space is reserved to hold a tunnel header.
2592 * Configure skb offsets and other fields accordingly.
2594 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
2595 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
2596 * Use with ENCAP_L3 flags to further specify the tunnel type.
2598 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
2599 * Use with ENCAP_L3/L4 flags to further specify the tunnel
2600 * type; *len* is the length of the inner MAC header.
2602 * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
2603 * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
2604 * L2 type as Ethernet.
2606 * A call to this helper is susceptible to change the underlying
2607 * packet buffer. Therefore, at load time, all checks on pointers
2608 * previously done by the verifier are invalidated and must be
2609 * performed again, if the helper is used in combination with
2610 * direct packet access.
2612 * 0 on success, or a negative error in case of failure.
2614 * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
2616 * Redirect the packet to the endpoint referenced by *map* at
2617 * index *key*. Depending on its type, this *map* can contain
2618 * references to net devices (for forwarding packets through other
2619 * ports), or to CPUs (for redirecting XDP frames to another CPU;
2620 * but this is only implemented for native XDP (with driver
2621 * support) as of this writing).
2623 * The lower two bits of *flags* are used as the return code if
2624 * the map lookup fails. This is so that the return value can be
2625 * one of the XDP program return codes up to **XDP_TX**, as chosen
2626 * by the caller. The higher bits of *flags* can be set to
2627 * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
2629 * With BPF_F_BROADCAST the packet will be broadcasted to all the
2630 * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
2631 * interface will be excluded when do broadcasting.
2633 * See also **bpf_redirect**\ (), which only supports redirecting
2634 * to an ifindex, but doesn't require a map to do so.
2636 * **XDP_REDIRECT** on success, or the value of the two lower bits
2637 * of the *flags* argument on error.
2639 * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
2641 * Redirect the packet to the socket referenced by *map* (of type
2642 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
2643 * egress interfaces can be used for redirection. The
2644 * **BPF_F_INGRESS** value in *flags* is used to make the
2645 * distinction (ingress path is selected if the flag is present,
2646 * egress path otherwise). This is the only flag supported for now.
2648 * **SK_PASS** on success, or **SK_DROP** on error.
2650 * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
2652 * Add an entry to, or update a *map* referencing sockets. The
2653 * *skops* is used as a new value for the entry associated to
2654 * *key*. *flags* is one of:
2657 * The entry for *key* must not exist in the map.
2659 * The entry for *key* must already exist in the map.
2661 * No condition on the existence of the entry for *key*.
2663 * If the *map* has eBPF programs (parser and verdict), those will
2664 * be inherited by the socket being added. If the socket is
2665 * already attached to eBPF programs, this results in an error.
2667 * 0 on success, or a negative error in case of failure.
2669 * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
2671 * Adjust the address pointed by *xdp_md*\ **->data_meta** by
2672 * *delta* (which can be positive or negative). Note that this
2673 * operation modifies the address stored in *xdp_md*\ **->data**,
2674 * so the latter must be loaded only after the helper has been
2677 * The use of *xdp_md*\ **->data_meta** is optional and programs
2678 * are not required to use it. The rationale is that when the
2679 * packet is processed with XDP (e.g. as DoS filter), it is
2680 * possible to push further meta data along with it before passing
2681 * to the stack, and to give the guarantee that an ingress eBPF
2682 * program attached as a TC classifier on the same device can pick
2683 * this up for further post-processing. Since TC works with socket
2684 * buffers, it remains possible to set from XDP the **mark** or
2685 * **priority** pointers, or other pointers for the socket buffer.
2686 * Having this scratch space generic and programmable allows for
2687 * more flexibility as the user is free to store whatever meta
2690 * A call to this helper is susceptible to change the underlying
2691 * packet buffer. Therefore, at load time, all checks on pointers
2692 * previously done by the verifier are invalidated and must be
2693 * performed again, if the helper is used in combination with
2694 * direct packet access.
2696 * 0 on success, or a negative error in case of failure.
2698 * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
2700 * Read the value of a perf event counter, and store it into *buf*
2701 * of size *buf_size*. This helper relies on a *map* of type
2702 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
2703 * counter is selected when *map* is updated with perf event file
2704 * descriptors. The *map* is an array whose size is the number of
2705 * available CPUs, and each cell contains a value relative to one
2706 * CPU. The value to retrieve is indicated by *flags*, that
2707 * contains the index of the CPU to look up, masked with
2708 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
2709 * **BPF_F_CURRENT_CPU** to indicate that the value for the
2710 * current CPU should be retrieved.
2712 * This helper behaves in a way close to
2713 * **bpf_perf_event_read**\ () helper, save that instead of
2714 * just returning the value observed, it fills the *buf*
2715 * structure. This allows for additional data to be retrieved: in
2716 * particular, the enabled and running times (in *buf*\
2717 * **->enabled** and *buf*\ **->running**, respectively) are
2718 * copied. In general, **bpf_perf_event_read_value**\ () is
2719 * recommended over **bpf_perf_event_read**\ (), which has some
2720 * ABI issues and provides fewer functionalities.
2722 * These values are interesting, because hardware PMU (Performance
2723 * Monitoring Unit) counters are limited resources. When there are
2724 * more PMU based perf events opened than available counters,
2725 * kernel will multiplex these events so each event gets certain
2726 * percentage (but not all) of the PMU time. In case that
2727 * multiplexing happens, the number of samples or counter value
2728 * will not reflect the case compared to when no multiplexing
2729 * occurs. This makes comparison between different runs difficult.
2730 * Typically, the counter value should be normalized before
2731 * comparing to other experiments. The usual normalization is done
2736 * normalized_counter = counter * t_enabled / t_running
2738 * Where t_enabled is the time enabled for event and t_running is
2739 * the time running for event since last normalization. The
2740 * enabled and running times are accumulated since the perf event
2741 * open. To achieve scaling factor between two invocations of an
2742 * eBPF program, users can use CPU id as the key (which is
2743 * typical for perf array usage model) to remember the previous
2744 * value and do the calculation inside the eBPF program.
2746 * 0 on success, or a negative error in case of failure.
2748 * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
2750 * For en eBPF program attached to a perf event, retrieve the
2751 * value of the event counter associated to *ctx* and store it in
2752 * the structure pointed by *buf* and of size *buf_size*. Enabled
2753 * and running times are also stored in the structure (see
2754 * description of helper **bpf_perf_event_read_value**\ () for
2757 * 0 on success, or a negative error in case of failure.
2759 * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
2761 * Emulate a call to **getsockopt()** on the socket associated to
2762 * *bpf_socket*, which must be a full socket. The *level* at
2763 * which the option resides and the name *optname* of the option
2764 * must be specified, see **getsockopt(2)** for more information.
2765 * The retrieved value is stored in the structure pointed by
2766 * *opval* and of length *optlen*.
2768 * *bpf_socket* should be one of the following:
2770 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
2771 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
2772 * and **BPF_CGROUP_INET6_CONNECT**.
2774 * This helper actually implements a subset of **getsockopt()**.
2775 * It supports the following *level*\ s:
2777 * * **IPPROTO_TCP**, which supports *optname*
2778 * **TCP_CONGESTION**.
2779 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
2780 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
2782 * 0 on success, or a negative error in case of failure.
2784 * long bpf_override_return(struct pt_regs *regs, u64 rc)
2786 * Used for error injection, this helper uses kprobes to override
2787 * the return value of the probed function, and to set it to *rc*.
2788 * The first argument is the context *regs* on which the kprobe
2791 * This helper works by setting the PC (program counter)
2792 * to an override function which is run in place of the original
2793 * probed function. This means the probed function is not run at
2794 * all. The replacement function just returns with the required
2797 * This helper has security implications, and thus is subject to
2798 * restrictions. It is only available if the kernel was compiled
2799 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
2800 * option, and in this case it only works on functions tagged with
2801 * **ALLOW_ERROR_INJECTION** in the kernel code.
2803 * Also, the helper is only available for the architectures having
2804 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
2805 * x86 architecture is the only one to support this feature.
2809 * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
2811 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
2812 * for the full TCP socket associated to *bpf_sock_ops* to
2815 * The primary use of this field is to determine if there should
2816 * be calls to eBPF programs of type
2817 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
2818 * code. A program of the same type can change its value, per
2819 * connection and as necessary, when the connection is
2820 * established. This field is directly accessible for reading, but
2821 * this helper must be used for updates in order to return an
2822 * error if an eBPF program tries to set a callback that is not
2823 * supported in the current kernel.
2825 * *argval* is a flag array which can combine these flags:
2827 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
2828 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
2829 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
2830 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
2832 * Therefore, this function can be used to clear a callback flag by
2833 * setting the appropriate bit to zero. e.g. to disable the RTO
2836 * **bpf_sock_ops_cb_flags_set(bpf_sock,**
2837 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
2839 * Here are some examples of where one could call such eBPF
2843 * * When a packet is retransmitted.
2844 * * When the connection terminates.
2845 * * When a packet is sent.
2846 * * When a packet is received.
2848 * Code **-EINVAL** if the socket is not a full TCP socket;
2849 * otherwise, a positive number containing the bits that could not
2850 * be set is returned (which comes down to 0 if all bits were set
2853 * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
2855 * This helper is used in programs implementing policies at the
2856 * socket level. If the message *msg* is allowed to pass (i.e. if
2857 * the verdict eBPF program returns **SK_PASS**), redirect it to
2858 * the socket referenced by *map* (of type
2859 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
2860 * egress interfaces can be used for redirection. The
2861 * **BPF_F_INGRESS** value in *flags* is used to make the
2862 * distinction (ingress path is selected if the flag is present,
2863 * egress path otherwise). This is the only flag supported for now.
2865 * **SK_PASS** on success, or **SK_DROP** on error.
2867 * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
2869 * For socket policies, apply the verdict of the eBPF program to
2870 * the next *bytes* (number of bytes) of message *msg*.
2872 * For example, this helper can be used in the following cases:
2874 * * A single **sendmsg**\ () or **sendfile**\ () system call
2875 * contains multiple logical messages that the eBPF program is
2876 * supposed to read and for which it should apply a verdict.
2877 * * An eBPF program only cares to read the first *bytes* of a
2878 * *msg*. If the message has a large payload, then setting up
2879 * and calling the eBPF program repeatedly for all bytes, even
2880 * though the verdict is already known, would create unnecessary
2883 * When called from within an eBPF program, the helper sets a
2884 * counter internal to the BPF infrastructure, that is used to
2885 * apply the last verdict to the next *bytes*. If *bytes* is
2886 * smaller than the current data being processed from a
2887 * **sendmsg**\ () or **sendfile**\ () system call, the first
2888 * *bytes* will be sent and the eBPF program will be re-run with
2889 * the pointer for start of data pointing to byte number *bytes*
2890 * **+ 1**. If *bytes* is larger than the current data being
2891 * processed, then the eBPF verdict will be applied to multiple
2892 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
2895 * Note that if a socket closes with the internal counter holding
2896 * a non-zero value, this is not a problem because data is not
2897 * being buffered for *bytes* and is sent as it is received.
2901 * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
2903 * For socket policies, prevent the execution of the verdict eBPF
2904 * program for message *msg* until *bytes* (byte number) have been
2907 * This can be used when one needs a specific number of bytes
2908 * before a verdict can be assigned, even if the data spans
2909 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
2910 * case would be a user calling **sendmsg**\ () repeatedly with
2911 * 1-byte long message segments. Obviously, this is bad for
2912 * performance, but it is still valid. If the eBPF program needs
2913 * *bytes* bytes to validate a header, this helper can be used to
2914 * prevent the eBPF program to be called again until *bytes* have
2919 * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
2921 * For socket policies, pull in non-linear data from user space
2922 * for *msg* and set pointers *msg*\ **->data** and *msg*\
2923 * **->data_end** to *start* and *end* bytes offsets into *msg*,
2926 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
2927 * *msg* it can only parse data that the (**data**, **data_end**)
2928 * pointers have already consumed. For **sendmsg**\ () hooks this
2929 * is likely the first scatterlist element. But for calls relying
2930 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
2931 * be the range (**0**, **0**) because the data is shared with
2932 * user space and by default the objective is to avoid allowing
2933 * user space to modify data while (or after) eBPF verdict is
2934 * being decided. This helper can be used to pull in data and to
2935 * set the start and end pointer to given values. Data will be
2936 * copied if necessary (i.e. if data was not linear and if start
2937 * and end pointers do not point to the same chunk).
2939 * A call to this helper is susceptible to change the underlying
2940 * packet buffer. Therefore, at load time, all checks on pointers
2941 * previously done by the verifier are invalidated and must be
2942 * performed again, if the helper is used in combination with
2943 * direct packet access.
2945 * All values for *flags* are reserved for future usage, and must
2948 * 0 on success, or a negative error in case of failure.
2950 * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
2952 * Bind the socket associated to *ctx* to the address pointed by
2953 * *addr*, of length *addr_len*. This allows for making outgoing
2954 * connection from the desired IP address, which can be useful for
2955 * example when all processes inside a cgroup should use one
2956 * single IP address on a host that has multiple IP configured.
2958 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
2959 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
2960 * **AF_INET6**). It's advised to pass zero port (**sin_port**
2961 * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
2962 * behavior and lets the kernel efficiently pick up an unused
2963 * port as long as 4-tuple is unique. Passing non-zero port might
2964 * lead to degraded performance.
2966 * 0 on success, or a negative error in case of failure.
2968 * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
2970 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
2971 * possible to both shrink and grow the packet tail.
2972 * Shrink done via *delta* being a negative integer.
2974 * A call to this helper is susceptible to change the underlying
2975 * packet buffer. Therefore, at load time, all checks on pointers
2976 * previously done by the verifier are invalidated and must be
2977 * performed again, if the helper is used in combination with
2978 * direct packet access.
2980 * 0 on success, or a negative error in case of failure.
2982 * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
2984 * Retrieve the XFRM state (IP transform framework, see also
2985 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
2987 * The retrieved value is stored in the **struct bpf_xfrm_state**
2988 * pointed by *xfrm_state* and of length *size*.
2990 * All values for *flags* are reserved for future usage, and must
2993 * This helper is available only if the kernel was compiled with
2994 * **CONFIG_XFRM** configuration option.
2996 * 0 on success, or a negative error in case of failure.
2998 * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
3000 * Return a user or a kernel stack in bpf program provided buffer.
3001 * To achieve this, the helper needs *ctx*, which is a pointer
3002 * to the context on which the tracing program is executed.
3003 * To store the stacktrace, the bpf program provides *buf* with
3004 * a nonnegative *size*.
3006 * The last argument, *flags*, holds the number of stack frames to
3007 * skip (from 0 to 255), masked with
3008 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
3009 * the following flags:
3011 * **BPF_F_USER_STACK**
3012 * Collect a user space stack instead of a kernel stack.
3013 * **BPF_F_USER_BUILD_ID**
3014 * Collect buildid+offset instead of ips for user stack,
3015 * only valid if **BPF_F_USER_STACK** is also specified.
3017 * **bpf_get_stack**\ () can collect up to
3018 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
3019 * to sufficient large buffer size. Note that
3020 * this limit can be controlled with the **sysctl** program, and
3021 * that it should be manually increased in order to profile long
3022 * user stacks (such as stacks for Java programs). To do so, use:
3026 * # sysctl kernel.perf_event_max_stack=<new value>
3028 * The non-negative copied *buf* length equal to or less than
3029 * *size* on success, or a negative error in case of failure.
3031 * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
3033 * This helper is similar to **bpf_skb_load_bytes**\ () in that
3034 * it provides an easy way to load *len* bytes from *offset*
3035 * from the packet associated to *skb*, into the buffer pointed
3036 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that
3037 * a fifth argument *start_header* exists in order to select a
3038 * base offset to start from. *start_header* can be one of:
3040 * **BPF_HDR_START_MAC**
3041 * Base offset to load data from is *skb*'s mac header.
3042 * **BPF_HDR_START_NET**
3043 * Base offset to load data from is *skb*'s network header.
3045 * In general, "direct packet access" is the preferred method to
3046 * access packet data, however, this helper is in particular useful
3047 * in socket filters where *skb*\ **->data** does not always point
3048 * to the start of the mac header and where "direct packet access"
3051 * 0 on success, or a negative error in case of failure.
3053 * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
3055 * Do FIB lookup in kernel tables using parameters in *params*.
3056 * If lookup is successful and result shows packet is to be
3057 * forwarded, the neighbor tables are searched for the nexthop.
3058 * If successful (ie., FIB lookup shows forwarding and nexthop
3059 * is resolved), the nexthop address is returned in ipv4_dst
3060 * or ipv6_dst based on family, smac is set to mac address of
3061 * egress device, dmac is set to nexthop mac address, rt_metric
3062 * is set to metric from route (IPv4/IPv6 only), and ifindex
3063 * is set to the device index of the nexthop from the FIB lookup.
3065 * *plen* argument is the size of the passed in struct.
3066 * *flags* argument can be a combination of one or more of the
3069 * **BPF_FIB_LOOKUP_DIRECT**
3070 * Do a direct table lookup vs full lookup using FIB
3072 * **BPF_FIB_LOOKUP_OUTPUT**
3073 * Perform lookup from an egress perspective (default is
3076 * *ctx* is either **struct xdp_md** for XDP programs or
3077 * **struct sk_buff** tc cls_act programs.
3079 * * < 0 if any input argument is invalid
3080 * * 0 on success (packet is forwarded, nexthop neighbor exists)
3081 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
3082 * packet is not forwarded or needs assist from full stack
3084 * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU
3085 * was exceeded and output params->mtu_result contains the MTU.
3087 * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
3089 * Add an entry to, or update a sockhash *map* referencing sockets.
3090 * The *skops* is used as a new value for the entry associated to
3091 * *key*. *flags* is one of:
3094 * The entry for *key* must not exist in the map.
3096 * The entry for *key* must already exist in the map.
3098 * No condition on the existence of the entry for *key*.
3100 * If the *map* has eBPF programs (parser and verdict), those will
3101 * be inherited by the socket being added. If the socket is
3102 * already attached to eBPF programs, this results in an error.
3104 * 0 on success, or a negative error in case of failure.
3106 * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
3108 * This helper is used in programs implementing policies at the
3109 * socket level. If the message *msg* is allowed to pass (i.e. if
3110 * the verdict eBPF program returns **SK_PASS**), redirect it to
3111 * the socket referenced by *map* (of type
3112 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
3113 * egress interfaces can be used for redirection. The
3114 * **BPF_F_INGRESS** value in *flags* is used to make the
3115 * distinction (ingress path is selected if the flag is present,
3116 * egress path otherwise). This is the only flag supported for now.
3118 * **SK_PASS** on success, or **SK_DROP** on error.
3120 * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
3122 * This helper is used in programs implementing policies at the
3123 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
3124 * if the verdict eBPF program returns **SK_PASS**), redirect it
3125 * to the socket referenced by *map* (of type
3126 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
3127 * egress interfaces can be used for redirection. The
3128 * **BPF_F_INGRESS** value in *flags* is used to make the
3129 * distinction (ingress path is selected if the flag is present,
3130 * egress otherwise). This is the only flag supported for now.
3132 * **SK_PASS** on success, or **SK_DROP** on error.
3134 * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
3136 * Encapsulate the packet associated to *skb* within a Layer 3
3137 * protocol header. This header is provided in the buffer at
3138 * address *hdr*, with *len* its size in bytes. *type* indicates
3139 * the protocol of the header and can be one of:
3141 * **BPF_LWT_ENCAP_SEG6**
3142 * IPv6 encapsulation with Segment Routing Header
3143 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
3144 * the IPv6 header is computed by the kernel.
3145 * **BPF_LWT_ENCAP_SEG6_INLINE**
3146 * Only works if *skb* contains an IPv6 packet. Insert a
3147 * Segment Routing Header (**struct ipv6_sr_hdr**) inside
3149 * **BPF_LWT_ENCAP_IP**
3150 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
3151 * must be IPv4 or IPv6, followed by zero or more
3152 * additional headers, up to **LWT_BPF_MAX_HEADROOM**
3153 * total bytes in all prepended headers. Please note that
3154 * if **skb_is_gso**\ (*skb*) is true, no more than two
3155 * headers can be prepended, and the inner header, if
3156 * present, should be either GRE or UDP/GUE.
3158 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
3159 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
3160 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
3161 * **BPF_PROG_TYPE_LWT_XMIT**.
3163 * A call to this helper is susceptible to change the underlying
3164 * packet buffer. Therefore, at load time, all checks on pointers
3165 * previously done by the verifier are invalidated and must be
3166 * performed again, if the helper is used in combination with
3167 * direct packet access.
3169 * 0 on success, or a negative error in case of failure.
3171 * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
3173 * Store *len* bytes from address *from* into the packet
3174 * associated to *skb*, at *offset*. Only the flags, tag and TLVs
3175 * inside the outermost IPv6 Segment Routing Header can be
3176 * modified through this helper.
3178 * A call to this helper is susceptible to change the underlying
3179 * packet buffer. Therefore, at load time, all checks on pointers
3180 * previously done by the verifier are invalidated and must be
3181 * performed again, if the helper is used in combination with
3182 * direct packet access.
3184 * 0 on success, or a negative error in case of failure.
3186 * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
3188 * Adjust the size allocated to TLVs in the outermost IPv6
3189 * Segment Routing Header contained in the packet associated to
3190 * *skb*, at position *offset* by *delta* bytes. Only offsets
3191 * after the segments are accepted. *delta* can be as well
3192 * positive (growing) as negative (shrinking).
3194 * A call to this helper is susceptible to change the underlying
3195 * packet buffer. Therefore, at load time, all checks on pointers
3196 * previously done by the verifier are invalidated and must be
3197 * performed again, if the helper is used in combination with
3198 * direct packet access.
3200 * 0 on success, or a negative error in case of failure.
3202 * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
3204 * Apply an IPv6 Segment Routing action of type *action* to the
3205 * packet associated to *skb*. Each action takes a parameter
3206 * contained at address *param*, and of length *param_len* bytes.
3207 * *action* can be one of:
3209 * **SEG6_LOCAL_ACTION_END_X**
3210 * End.X action: Endpoint with Layer-3 cross-connect.
3211 * Type of *param*: **struct in6_addr**.
3212 * **SEG6_LOCAL_ACTION_END_T**
3213 * End.T action: Endpoint with specific IPv6 table lookup.
3214 * Type of *param*: **int**.
3215 * **SEG6_LOCAL_ACTION_END_B6**
3216 * End.B6 action: Endpoint bound to an SRv6 policy.
3217 * Type of *param*: **struct ipv6_sr_hdr**.
3218 * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
3219 * End.B6.Encap action: Endpoint bound to an SRv6
3220 * encapsulation policy.
3221 * Type of *param*: **struct ipv6_sr_hdr**.
3223 * A call to this helper is susceptible to change the underlying
3224 * packet buffer. Therefore, at load time, all checks on pointers
3225 * previously done by the verifier are invalidated and must be
3226 * performed again, if the helper is used in combination with
3227 * direct packet access.
3229 * 0 on success, or a negative error in case of failure.
3231 * long bpf_rc_repeat(void *ctx)
3233 * This helper is used in programs implementing IR decoding, to
3234 * report a successfully decoded repeat key message. This delays
3235 * the generation of a key up event for previously generated
3238 * Some IR protocols like NEC have a special IR message for
3239 * repeating last button, for when a button is held down.
3241 * The *ctx* should point to the lirc sample as passed into
3244 * This helper is only available is the kernel was compiled with
3245 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3250 * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
3252 * This helper is used in programs implementing IR decoding, to
3253 * report a successfully decoded key press with *scancode*,
3254 * *toggle* value in the given *protocol*. The scancode will be
3255 * translated to a keycode using the rc keymap, and reported as
3256 * an input key down event. After a period a key up event is
3257 * generated. This period can be extended by calling either
3258 * **bpf_rc_keydown**\ () again with the same values, or calling
3259 * **bpf_rc_repeat**\ ().
3261 * Some protocols include a toggle bit, in case the button was
3262 * released and pressed again between consecutive scancodes.
3264 * The *ctx* should point to the lirc sample as passed into
3267 * The *protocol* is the decoded protocol number (see
3268 * **enum rc_proto** for some predefined values).
3270 * This helper is only available is the kernel was compiled with
3271 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3276 * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
3278 * Return the cgroup v2 id of the socket associated with the *skb*.
3279 * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
3280 * helper for cgroup v1 by providing a tag resp. identifier that
3281 * can be matched on or used for map lookups e.g. to implement
3282 * policy. The cgroup v2 id of a given path in the hierarchy is
3283 * exposed in user space through the f_handle API in order to get
3284 * to the same 64-bit id.
3286 * This helper can be used on TC egress path, but not on ingress,
3287 * and is available only if the kernel was compiled with the
3288 * **CONFIG_SOCK_CGROUP_DATA** configuration option.
3290 * The id is returned or 0 in case the id could not be retrieved.
3292 * u64 bpf_get_current_cgroup_id(void)
3294 * Get the current cgroup id based on the cgroup within which
3295 * the current task is running.
3297 * A 64-bit integer containing the current cgroup id based
3298 * on the cgroup within which the current task is running.
3300 * void *bpf_get_local_storage(void *map, u64 flags)
3302 * Get the pointer to the local storage area.
3303 * The type and the size of the local storage is defined
3304 * by the *map* argument.
3305 * The *flags* meaning is specific for each map type,
3306 * and has to be 0 for cgroup local storage.
3308 * Depending on the BPF program type, a local storage area
3309 * can be shared between multiple instances of the BPF program,
3310 * running simultaneously.
3312 * A user should care about the synchronization by himself.
3313 * For example, by using the **BPF_ATOMIC** instructions to alter
3316 * A pointer to the local storage area.
3318 * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
3320 * Select a **SO_REUSEPORT** socket from a
3321 * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
3322 * It checks the selected socket is matching the incoming
3323 * request in the socket buffer.
3325 * 0 on success, or a negative error in case of failure.
3327 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
3329 * Return id of cgroup v2 that is ancestor of cgroup associated
3330 * with the *skb* at the *ancestor_level*. The root cgroup is at
3331 * *ancestor_level* zero and each step down the hierarchy
3332 * increments the level. If *ancestor_level* == level of cgroup
3333 * associated with *skb*, then return value will be same as that
3334 * of **bpf_skb_cgroup_id**\ ().
3336 * The helper is useful to implement policies based on cgroups
3337 * that are upper in hierarchy than immediate cgroup associated
3340 * The format of returned id and helper limitations are same as in
3341 * **bpf_skb_cgroup_id**\ ().
3343 * The id is returned or 0 in case the id could not be retrieved.
3345 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3347 * Look for TCP socket matching *tuple*, optionally in a child
3348 * network namespace *netns*. The return value must be checked,
3349 * and if non-**NULL**, released via **bpf_sk_release**\ ().
3351 * The *ctx* should point to the context of the program, such as
3352 * the skb or socket (depending on the hook in use). This is used
3353 * to determine the base network namespace for the lookup.
3355 * *tuple_size* must be one of:
3357 * **sizeof**\ (*tuple*\ **->ipv4**)
3358 * Look for an IPv4 socket.
3359 * **sizeof**\ (*tuple*\ **->ipv6**)
3360 * Look for an IPv6 socket.
3362 * If the *netns* is a negative signed 32-bit integer, then the
3363 * socket lookup table in the netns associated with the *ctx*
3364 * will be used. For the TC hooks, this is the netns of the device
3365 * in the skb. For socket hooks, this is the netns of the socket.
3366 * If *netns* is any other signed 32-bit value greater than or
3367 * equal to zero then it specifies the ID of the netns relative to
3368 * the netns associated with the *ctx*. *netns* values beyond the
3369 * range of 32-bit integers are reserved for future use.
3371 * All values for *flags* are reserved for future usage, and must
3374 * This helper is available only if the kernel was compiled with
3375 * **CONFIG_NET** configuration option.
3377 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3378 * For sockets with reuseport option, the **struct bpf_sock**
3379 * result is from *reuse*\ **->socks**\ [] using the hash of the
3382 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3384 * Look for UDP socket matching *tuple*, optionally in a child
3385 * network namespace *netns*. The return value must be checked,
3386 * and if non-**NULL**, released via **bpf_sk_release**\ ().
3388 * The *ctx* should point to the context of the program, such as
3389 * the skb or socket (depending on the hook in use). This is used
3390 * to determine the base network namespace for the lookup.
3392 * *tuple_size* must be one of:
3394 * **sizeof**\ (*tuple*\ **->ipv4**)
3395 * Look for an IPv4 socket.
3396 * **sizeof**\ (*tuple*\ **->ipv6**)
3397 * Look for an IPv6 socket.
3399 * If the *netns* is a negative signed 32-bit integer, then the
3400 * socket lookup table in the netns associated with the *ctx*
3401 * will be used. For the TC hooks, this is the netns of the device
3402 * in the skb. For socket hooks, this is the netns of the socket.
3403 * If *netns* is any other signed 32-bit value greater than or
3404 * equal to zero then it specifies the ID of the netns relative to
3405 * the netns associated with the *ctx*. *netns* values beyond the
3406 * range of 32-bit integers are reserved for future use.
3408 * All values for *flags* are reserved for future usage, and must
3411 * This helper is available only if the kernel was compiled with
3412 * **CONFIG_NET** configuration option.
3414 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3415 * For sockets with reuseport option, the **struct bpf_sock**
3416 * result is from *reuse*\ **->socks**\ [] using the hash of the
3419 * long bpf_sk_release(void *sock)
3421 * Release the reference held by *sock*. *sock* must be a
3422 * non-**NULL** pointer that was returned from
3423 * **bpf_sk_lookup_xxx**\ ().
3425 * 0 on success, or a negative error in case of failure.
3427 * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
3429 * Push an element *value* in *map*. *flags* is one of:
3432 * If the queue/stack is full, the oldest element is
3433 * removed to make room for this.
3435 * 0 on success, or a negative error in case of failure.
3437 * long bpf_map_pop_elem(struct bpf_map *map, void *value)
3439 * Pop an element from *map*.
3441 * 0 on success, or a negative error in case of failure.
3443 * long bpf_map_peek_elem(struct bpf_map *map, void *value)
3445 * Get an element from *map* without removing it.
3447 * 0 on success, or a negative error in case of failure.
3449 * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
3451 * For socket policies, insert *len* bytes into *msg* at offset
3454 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
3455 * *msg* it may want to insert metadata or options into the *msg*.
3456 * This can later be read and used by any of the lower layer BPF
3459 * This helper may fail if under memory pressure (a malloc
3460 * fails) in these cases BPF programs will get an appropriate
3461 * error and BPF programs will need to handle them.
3463 * 0 on success, or a negative error in case of failure.
3465 * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
3467 * Will remove *len* bytes from a *msg* starting at byte *start*.
3468 * This may result in **ENOMEM** errors under certain situations if
3469 * an allocation and copy are required due to a full ring buffer.
3470 * However, the helper will try to avoid doing the allocation
3471 * if possible. Other errors can occur if input parameters are
3472 * invalid either due to *start* byte not being valid part of *msg*
3473 * payload and/or *pop* value being to large.
3475 * 0 on success, or a negative error in case of failure.
3477 * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
3479 * This helper is used in programs implementing IR decoding, to
3480 * report a successfully decoded pointer movement.
3482 * The *ctx* should point to the lirc sample as passed into
3485 * This helper is only available is the kernel was compiled with
3486 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3491 * long bpf_spin_lock(struct bpf_spin_lock *lock)
3493 * Acquire a spinlock represented by the pointer *lock*, which is
3494 * stored as part of a value of a map. Taking the lock allows to
3495 * safely update the rest of the fields in that value. The
3496 * spinlock can (and must) later be released with a call to
3497 * **bpf_spin_unlock**\ (\ *lock*\ ).
3499 * Spinlocks in BPF programs come with a number of restrictions
3502 * * **bpf_spin_lock** objects are only allowed inside maps of
3503 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
3504 * list could be extended in the future).
3505 * * BTF description of the map is mandatory.
3506 * * The BPF program can take ONE lock at a time, since taking two
3507 * or more could cause dead locks.
3508 * * Only one **struct bpf_spin_lock** is allowed per map element.
3509 * * When the lock is taken, calls (either BPF to BPF or helpers)
3511 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
3512 * allowed inside a spinlock-ed region.
3513 * * The BPF program MUST call **bpf_spin_unlock**\ () to release
3514 * the lock, on all execution paths, before it returns.
3515 * * The BPF program can access **struct bpf_spin_lock** only via
3516 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
3517 * helpers. Loading or storing data into the **struct
3518 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
3519 * * To use the **bpf_spin_lock**\ () helper, the BTF description
3520 * of the map value must be a struct and have **struct
3521 * bpf_spin_lock** *anyname*\ **;** field at the top level.
3522 * Nested lock inside another struct is not allowed.
3523 * * The **struct bpf_spin_lock** *lock* field in a map value must
3524 * be aligned on a multiple of 4 bytes in that value.
3525 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
3526 * the **bpf_spin_lock** field to user space.
3527 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
3528 * a BPF program, do not update the **bpf_spin_lock** field.
3529 * * **bpf_spin_lock** cannot be on the stack or inside a
3530 * networking packet (it can only be inside of a map values).
3531 * * **bpf_spin_lock** is available to root only.
3532 * * Tracing programs and socket filter programs cannot use
3533 * **bpf_spin_lock**\ () due to insufficient preemption checks
3534 * (but this may change in the future).
3535 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
3539 * long bpf_spin_unlock(struct bpf_spin_lock *lock)
3541 * Release the *lock* previously locked by a call to
3542 * **bpf_spin_lock**\ (\ *lock*\ ).
3546 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
3548 * This helper gets a **struct bpf_sock** pointer such
3549 * that all the fields in this **bpf_sock** can be accessed.
3551 * A **struct bpf_sock** pointer on success, or **NULL** in
3554 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
3556 * This helper gets a **struct bpf_tcp_sock** pointer from a
3557 * **struct bpf_sock** pointer.
3559 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
3562 * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
3564 * Set ECN (Explicit Congestion Notification) field of IP header
3565 * to **CE** (Congestion Encountered) if current value is **ECT**
3566 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
3569 * 1 if the **CE** flag is set (either by the current helper call
3570 * or because it was already present), 0 if it is not set.
3572 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
3574 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
3575 * **bpf_sk_release**\ () is unnecessary and not allowed.
3577 * A **struct bpf_sock** pointer on success, or **NULL** in
3580 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3582 * Look for TCP socket matching *tuple*, optionally in a child
3583 * network namespace *netns*. The return value must be checked,
3584 * and if non-**NULL**, released via **bpf_sk_release**\ ().
3586 * This function is identical to **bpf_sk_lookup_tcp**\ (), except
3587 * that it also returns timewait or request sockets. Use
3588 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
3591 * This helper is available only if the kernel was compiled with
3592 * **CONFIG_NET** configuration option.
3594 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3595 * For sockets with reuseport option, the **struct bpf_sock**
3596 * result is from *reuse*\ **->socks**\ [] using the hash of the
3599 * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
3601 * Check whether *iph* and *th* contain a valid SYN cookie ACK for
3602 * the listening socket in *sk*.
3604 * *iph* points to the start of the IPv4 or IPv6 header, while
3605 * *iph_len* contains **sizeof**\ (**struct iphdr**) or
3606 * **sizeof**\ (**struct ipv6hdr**).
3608 * *th* points to the start of the TCP header, while *th_len*
3609 * contains the length of the TCP header (at least
3610 * **sizeof**\ (**struct tcphdr**)).
3612 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
3615 * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
3617 * Get name of sysctl in /proc/sys/ and copy it into provided by
3618 * program buffer *buf* of size *buf_len*.
3620 * The buffer is always NUL terminated, unless it's zero-sized.
3622 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
3623 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
3624 * only (e.g. "tcp_mem").
3626 * Number of character copied (not including the trailing NUL).
3628 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
3629 * truncated name in this case).
3631 * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
3633 * Get current value of sysctl as it is presented in /proc/sys
3634 * (incl. newline, etc), and copy it as a string into provided
3635 * by program buffer *buf* of size *buf_len*.
3637 * The whole value is copied, no matter what file position user
3638 * space issued e.g. sys_read at.
3640 * The buffer is always NUL terminated, unless it's zero-sized.
3642 * Number of character copied (not including the trailing NUL).
3644 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
3645 * truncated name in this case).
3647 * **-EINVAL** if current value was unavailable, e.g. because
3648 * sysctl is uninitialized and read returns -EIO for it.
3650 * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
3652 * Get new value being written by user space to sysctl (before
3653 * the actual write happens) and copy it as a string into
3654 * provided by program buffer *buf* of size *buf_len*.
3656 * User space may write new value at file position > 0.
3658 * The buffer is always NUL terminated, unless it's zero-sized.
3660 * Number of character copied (not including the trailing NUL).
3662 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
3663 * truncated name in this case).
3665 * **-EINVAL** if sysctl is being read.
3667 * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
3669 * Override new value being written by user space to sysctl with
3670 * value provided by program in buffer *buf* of size *buf_len*.
3672 * *buf* should contain a string in same form as provided by user
3673 * space on sysctl write.
3675 * User space may write new value at file position > 0. To override
3676 * the whole sysctl value file position should be set to zero.
3680 * **-E2BIG** if the *buf_len* is too big.
3682 * **-EINVAL** if sysctl is being read.
3684 * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
3686 * Convert the initial part of the string from buffer *buf* of
3687 * size *buf_len* to a long integer according to the given base
3688 * and save the result in *res*.
3690 * The string may begin with an arbitrary amount of white space
3691 * (as determined by **isspace**\ (3)) followed by a single
3692 * optional '**-**' sign.
3694 * Five least significant bits of *flags* encode base, other bits
3695 * are currently unused.
3697 * Base must be either 8, 10, 16 or 0 to detect it automatically
3698 * similar to user space **strtol**\ (3).
3700 * Number of characters consumed on success. Must be positive but
3701 * no more than *buf_len*.
3703 * **-EINVAL** if no valid digits were found or unsupported base
3706 * **-ERANGE** if resulting value was out of range.
3708 * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
3710 * Convert the initial part of the string from buffer *buf* of
3711 * size *buf_len* to an unsigned long integer according to the
3712 * given base and save the result in *res*.
3714 * The string may begin with an arbitrary amount of white space
3715 * (as determined by **isspace**\ (3)).
3717 * Five least significant bits of *flags* encode base, other bits
3718 * are currently unused.
3720 * Base must be either 8, 10, 16 or 0 to detect it automatically
3721 * similar to user space **strtoul**\ (3).
3723 * Number of characters consumed on success. Must be positive but
3724 * no more than *buf_len*.
3726 * **-EINVAL** if no valid digits were found or unsupported base
3729 * **-ERANGE** if resulting value was out of range.
3731 * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
3733 * Get a bpf-local-storage from a *sk*.
3735 * Logically, it could be thought of getting the value from
3736 * a *map* with *sk* as the **key**. From this
3737 * perspective, the usage is not much different from
3738 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
3739 * helper enforces the key must be a full socket and the map must
3740 * be a **BPF_MAP_TYPE_SK_STORAGE** also.
3742 * Underneath, the value is stored locally at *sk* instead of
3743 * the *map*. The *map* is used as the bpf-local-storage
3744 * "type". The bpf-local-storage "type" (i.e. the *map*) is
3745 * searched against all bpf-local-storages residing at *sk*.
3747 * *sk* is a kernel **struct sock** pointer for LSM program.
3748 * *sk* is a **struct bpf_sock** pointer for other program types.
3750 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
3751 * used such that a new bpf-local-storage will be
3752 * created if one does not exist. *value* can be used
3753 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
3754 * the initial value of a bpf-local-storage. If *value* is
3755 * **NULL**, the new bpf-local-storage will be zero initialized.
3757 * A bpf-local-storage pointer is returned on success.
3759 * **NULL** if not found or there was an error in adding
3760 * a new bpf-local-storage.
3762 * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
3764 * Delete a bpf-local-storage from a *sk*.
3768 * **-ENOENT** if the bpf-local-storage cannot be found.
3769 * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
3771 * long bpf_send_signal(u32 sig)
3773 * Send signal *sig* to the process of the current task.
3774 * The signal may be delivered to any of this process's threads.
3776 * 0 on success or successfully queued.
3778 * **-EBUSY** if work queue under nmi is full.
3780 * **-EINVAL** if *sig* is invalid.
3782 * **-EPERM** if no permission to send the *sig*.
3784 * **-EAGAIN** if bpf program can try again.
3786 * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
3788 * Try to issue a SYN cookie for the packet with corresponding
3789 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
3791 * *iph* points to the start of the IPv4 or IPv6 header, while
3792 * *iph_len* contains **sizeof**\ (**struct iphdr**) or
3793 * **sizeof**\ (**struct ipv6hdr**).
3795 * *th* points to the start of the TCP header, while *th_len*
3796 * contains the length of the TCP header with options (at least
3797 * **sizeof**\ (**struct tcphdr**)).
3799 * On success, lower 32 bits hold the generated SYN cookie in
3800 * followed by 16 bits which hold the MSS value for that cookie,
3801 * and the top 16 bits are unused.
3803 * On failure, the returned value is one of the following:
3805 * **-EINVAL** SYN cookie cannot be issued due to error
3807 * **-ENOENT** SYN cookie should not be issued (no SYN flood)
3809 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
3811 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6
3813 * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
3815 * Write raw *data* blob into a special BPF perf event held by
3816 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
3817 * event must have the following attributes: **PERF_SAMPLE_RAW**
3818 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
3819 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
3821 * The *flags* are used to indicate the index in *map* for which
3822 * the value must be put, masked with **BPF_F_INDEX_MASK**.
3823 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
3824 * to indicate that the index of the current CPU core should be
3827 * The value to write, of *size*, is passed through eBPF stack and
3828 * pointed by *data*.
3830 * *ctx* is a pointer to in-kernel struct sk_buff.
3832 * This helper is similar to **bpf_perf_event_output**\ () but
3833 * restricted to raw_tracepoint bpf programs.
3835 * 0 on success, or a negative error in case of failure.
3837 * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
3839 * Safely attempt to read *size* bytes from user space address
3840 * *unsafe_ptr* and store the data in *dst*.
3842 * 0 on success, or a negative error in case of failure.
3844 * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
3846 * Safely attempt to read *size* bytes from kernel space address
3847 * *unsafe_ptr* and store the data in *dst*.
3849 * 0 on success, or a negative error in case of failure.
3851 * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
3853 * Copy a NUL terminated string from an unsafe user address
3854 * *unsafe_ptr* to *dst*. The *size* should include the
3855 * terminating NUL byte. In case the string length is smaller than
3856 * *size*, the target is not padded with further NUL bytes. If the
3857 * string length is larger than *size*, just *size*-1 bytes are
3858 * copied and the last byte is set to NUL.
3860 * On success, returns the number of bytes that were written,
3861 * including the terminal NUL. This makes this helper useful in
3862 * tracing programs for reading strings, and more importantly to
3863 * get its length at runtime. See the following snippet:
3867 * SEC("kprobe/sys_open")
3868 * void bpf_sys_open(struct pt_regs *ctx)
3870 * char buf[PATHLEN]; // PATHLEN is defined to 256
3871 * int res = bpf_probe_read_user_str(buf, sizeof(buf),
3874 * // Consume buf, for example push it to
3875 * // userspace via bpf_perf_event_output(); we
3876 * // can use res (the string length) as event
3877 * // size, after checking its boundaries.
3880 * In comparison, using **bpf_probe_read_user**\ () helper here
3881 * instead to read the string would require to estimate the length
3882 * at compile time, and would often result in copying more memory
3885 * Another useful use case is when parsing individual process
3886 * arguments or individual environment variables navigating
3887 * *current*\ **->mm->arg_start** and *current*\
3888 * **->mm->env_start**: using this helper and the return value,
3889 * one can quickly iterate at the right offset of the memory area.
3891 * On success, the strictly positive length of the output string,
3892 * including the trailing NUL character. On error, a negative
3895 * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
3897 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
3898 * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
3900 * On success, the strictly positive length of the string, including
3901 * the trailing NUL character. On error, a negative value.
3903 * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
3905 * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
3906 * *rcv_nxt* is the ack_seq to be sent out.
3908 * 0 on success, or a negative error in case of failure.
3910 * long bpf_send_signal_thread(u32 sig)
3912 * Send signal *sig* to the thread corresponding to the current task.
3914 * 0 on success or successfully queued.
3916 * **-EBUSY** if work queue under nmi is full.
3918 * **-EINVAL** if *sig* is invalid.
3920 * **-EPERM** if no permission to send the *sig*.
3922 * **-EAGAIN** if bpf program can try again.
3924 * u64 bpf_jiffies64(void)
3926 * Obtain the 64bit jiffies
3928 * The 64 bit jiffies
3930 * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
3932 * For an eBPF program attached to a perf event, retrieve the
3933 * branch records (**struct perf_branch_entry**) associated to *ctx*
3934 * and store it in the buffer pointed by *buf* up to size
3937 * On success, number of bytes written to *buf*. On error, a
3940 * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
3941 * instead return the number of bytes required to store all the
3942 * branch entries. If this flag is set, *buf* may be NULL.
3944 * **-EINVAL** if arguments invalid or **size** not a multiple
3945 * of **sizeof**\ (**struct perf_branch_entry**\ ).
3947 * **-ENOENT** if architecture does not support branch records.
3949 * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
3951 * Returns 0 on success, values for *pid* and *tgid* as seen from the current
3952 * *namespace* will be returned in *nsdata*.
3954 * 0 on success, or one of the following in case of failure:
3956 * **-EINVAL** if dev and inum supplied don't match dev_t and inode number
3957 * with nsfs of current task, or if dev conversion to dev_t lost high bits.
3959 * **-ENOENT** if pidns does not exists for the current task.
3961 * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
3963 * Write raw *data* blob into a special BPF perf event held by
3964 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
3965 * event must have the following attributes: **PERF_SAMPLE_RAW**
3966 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
3967 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
3969 * The *flags* are used to indicate the index in *map* for which
3970 * the value must be put, masked with **BPF_F_INDEX_MASK**.
3971 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
3972 * to indicate that the index of the current CPU core should be
3975 * The value to write, of *size*, is passed through eBPF stack and
3976 * pointed by *data*.
3978 * *ctx* is a pointer to in-kernel struct xdp_buff.
3980 * This helper is similar to **bpf_perf_eventoutput**\ () but
3981 * restricted to raw_tracepoint bpf programs.
3983 * 0 on success, or a negative error in case of failure.
3985 * u64 bpf_get_netns_cookie(void *ctx)
3987 * Retrieve the cookie (generated by the kernel) of the network
3988 * namespace the input *ctx* is associated with. The network
3989 * namespace cookie remains stable for its lifetime and provides
3990 * a global identifier that can be assumed unique. If *ctx* is
3991 * NULL, then the helper returns the cookie for the initial
3992 * network namespace. The cookie itself is very similar to that
3993 * of **bpf_get_socket_cookie**\ () helper, but for network
3994 * namespaces instead of sockets.
3996 * A 8-byte long opaque number.
3998 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
4000 * Return id of cgroup v2 that is ancestor of the cgroup associated
4001 * with the current task at the *ancestor_level*. The root cgroup
4002 * is at *ancestor_level* zero and each step down the hierarchy
4003 * increments the level. If *ancestor_level* == level of cgroup
4004 * associated with the current task, then return value will be the
4005 * same as that of **bpf_get_current_cgroup_id**\ ().
4007 * The helper is useful to implement policies based on cgroups
4008 * that are upper in hierarchy than immediate cgroup associated
4009 * with the current task.
4011 * The format of returned id and helper limitations are same as in
4012 * **bpf_get_current_cgroup_id**\ ().
4014 * The id is returned or 0 in case the id could not be retrieved.
4016 * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
4018 * Helper is overloaded depending on BPF program type. This
4019 * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
4020 * **BPF_PROG_TYPE_SCHED_ACT** programs.
4022 * Assign the *sk* to the *skb*. When combined with appropriate
4023 * routing configuration to receive the packet towards the socket,
4024 * will cause *skb* to be delivered to the specified socket.
4025 * Subsequent redirection of *skb* via **bpf_redirect**\ (),
4026 * **bpf_clone_redirect**\ () or other methods outside of BPF may
4027 * interfere with successful delivery to the socket.
4029 * This operation is only valid from TC ingress path.
4031 * The *flags* argument must be zero.
4033 * 0 on success, or a negative error in case of failure:
4035 * **-EINVAL** if specified *flags* are not supported.
4037 * **-ENOENT** if the socket is unavailable for assignment.
4039 * **-ENETUNREACH** if the socket is unreachable (wrong netns).
4041 * **-EOPNOTSUPP** if the operation is not supported, for example
4042 * a call from outside of TC ingress.
4044 * **-ESOCKTNOSUPPORT** if the socket type is not supported
4047 * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
4049 * Helper is overloaded depending on BPF program type. This
4050 * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
4052 * Select the *sk* as a result of a socket lookup.
4054 * For the operation to succeed passed socket must be compatible
4055 * with the packet description provided by the *ctx* object.
4057 * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
4058 * be an exact match. While IP family (**AF_INET** or
4059 * **AF_INET6**) must be compatible, that is IPv6 sockets
4060 * that are not v6-only can be selected for IPv4 packets.
4062 * Only TCP listeners and UDP unconnected sockets can be
4063 * selected. *sk* can also be NULL to reset any previous
4066 * *flags* argument can combination of following values:
4068 * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous
4069 * socket selection, potentially done by a BPF program
4070 * that ran before us.
4072 * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
4073 * load-balancing within reuseport group for the socket
4076 * On success *ctx->sk* will point to the selected socket.
4079 * 0 on success, or a negative errno in case of failure.
4081 * * **-EAFNOSUPPORT** if socket family (*sk->family*) is
4082 * not compatible with packet family (*ctx->family*).
4084 * * **-EEXIST** if socket has been already selected,
4085 * potentially by another program, and
4086 * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
4088 * * **-EINVAL** if unsupported flags were specified.
4090 * * **-EPROTOTYPE** if socket L4 protocol
4091 * (*sk->protocol*) doesn't match packet protocol
4092 * (*ctx->protocol*).
4094 * * **-ESOCKTNOSUPPORT** if socket is not in allowed
4095 * state (TCP listening or UDP unconnected).
4097 * u64 bpf_ktime_get_boot_ns(void)
4099 * Return the time elapsed since system boot, in nanoseconds.
4100 * Does include the time the system was suspended.
4101 * See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
4105 * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
4107 * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
4108 * out the format string.
4109 * The *m* represents the seq_file. The *fmt* and *fmt_size* are for
4110 * the format string itself. The *data* and *data_len* are format string
4111 * arguments. The *data* are a **u64** array and corresponding format string
4112 * values are stored in the array. For strings and pointers where pointees
4113 * are accessed, only the pointer values are stored in the *data* array.
4114 * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
4116 * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
4117 * Reading kernel memory may fail due to either invalid address or
4118 * valid address but requiring a major memory fault. If reading kernel memory
4119 * fails, the string for **%s** will be an empty string, and the ip
4120 * address for **%p{i,I}{4,6}** will be 0. Not returning error to
4121 * bpf program is consistent with what **bpf_trace_printk**\ () does for now.
4123 * 0 on success, or a negative error in case of failure:
4125 * **-EBUSY** if per-CPU memory copy buffer is busy, can try again
4126 * by returning 1 from bpf program.
4128 * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
4130 * **-E2BIG** if *fmt* contains too many format specifiers.
4132 * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
4134 * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
4136 * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
4137 * The *m* represents the seq_file. The *data* and *len* represent the
4138 * data to write in bytes.
4140 * 0 on success, or a negative error in case of failure:
4142 * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
4144 * u64 bpf_sk_cgroup_id(void *sk)
4146 * Return the cgroup v2 id of the socket *sk*.
4148 * *sk* must be a non-**NULL** pointer to a socket, e.g. one
4149 * returned from **bpf_sk_lookup_xxx**\ (),
4150 * **bpf_sk_fullsock**\ (), etc. The format of returned id is
4151 * same as in **bpf_skb_cgroup_id**\ ().
4153 * This helper is available only if the kernel was compiled with
4154 * the **CONFIG_SOCK_CGROUP_DATA** configuration option.
4156 * The id is returned or 0 in case the id could not be retrieved.
4158 * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
4160 * Return id of cgroup v2 that is ancestor of cgroup associated
4161 * with the *sk* at the *ancestor_level*. The root cgroup is at
4162 * *ancestor_level* zero and each step down the hierarchy
4163 * increments the level. If *ancestor_level* == level of cgroup
4164 * associated with *sk*, then return value will be same as that
4165 * of **bpf_sk_cgroup_id**\ ().
4167 * The helper is useful to implement policies based on cgroups
4168 * that are upper in hierarchy than immediate cgroup associated
4171 * The format of returned id and helper limitations are same as in
4172 * **bpf_sk_cgroup_id**\ ().
4174 * The id is returned or 0 in case the id could not be retrieved.
4176 * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
4178 * Copy *size* bytes from *data* into a ring buffer *ringbuf*.
4179 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4180 * of new data availability is sent.
4181 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4182 * of new data availability is sent unconditionally.
4183 * If **0** is specified in *flags*, an adaptive notification
4184 * of new data availability is sent.
4186 * An adaptive notification is a notification sent whenever the user-space
4187 * process has caught up and consumed all available payloads. In case the user-space
4188 * process is still processing a previous payload, then no notification is needed
4189 * as it will process the newly added payload automatically.
4191 * 0 on success, or a negative error in case of failure.
4193 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
4195 * Reserve *size* bytes of payload in a ring buffer *ringbuf*.
4196 * *flags* must be 0.
4198 * Valid pointer with *size* bytes of memory available; NULL,
4201 * void bpf_ringbuf_submit(void *data, u64 flags)
4203 * Submit reserved ring buffer sample, pointed to by *data*.
4204 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4205 * of new data availability is sent.
4206 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4207 * of new data availability is sent unconditionally.
4208 * If **0** is specified in *flags*, an adaptive notification
4209 * of new data availability is sent.
4211 * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
4213 * Nothing. Always succeeds.
4215 * void bpf_ringbuf_discard(void *data, u64 flags)
4217 * Discard reserved ring buffer sample, pointed to by *data*.
4218 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4219 * of new data availability is sent.
4220 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4221 * of new data availability is sent unconditionally.
4222 * If **0** is specified in *flags*, an adaptive notification
4223 * of new data availability is sent.
4225 * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
4227 * Nothing. Always succeeds.
4229 * u64 bpf_ringbuf_query(void *ringbuf, u64 flags)
4231 * Query various characteristics of provided ring buffer. What
4232 * exactly is queries is determined by *flags*:
4234 * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
4235 * * **BPF_RB_RING_SIZE**: The size of ring buffer.
4236 * * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
4237 * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
4239 * Data returned is just a momentary snapshot of actual values
4240 * and could be inaccurate, so this facility should be used to
4241 * power heuristics and for reporting, not to make 100% correct
4244 * Requested value, or 0, if *flags* are not recognized.
4246 * long bpf_csum_level(struct sk_buff *skb, u64 level)
4248 * Change the skbs checksum level by one layer up or down, or
4249 * reset it entirely to none in order to have the stack perform
4250 * checksum validation. The level is applicable to the following
4251 * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
4252 * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
4253 * through **bpf_skb_adjust_room**\ () helper with passing in
4254 * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call
4255 * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
4256 * the UDP header is removed. Similarly, an encap of the latter
4257 * into the former could be accompanied by a helper call to
4258 * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
4259 * skb is still intended to be processed in higher layers of the
4260 * stack instead of just egressing at tc.
4262 * There are three supported level settings at this time:
4264 * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
4265 * with CHECKSUM_UNNECESSARY.
4266 * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
4267 * with CHECKSUM_UNNECESSARY.
4268 * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
4269 * sets CHECKSUM_NONE to force checksum validation by the stack.
4270 * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
4273 * 0 on success, or a negative error in case of failure. In the
4274 * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
4275 * is returned or the error code -EACCES in case the skb is not
4276 * subject to CHECKSUM_UNNECESSARY.
4278 * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
4280 * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
4282 * *sk* if casting is valid, or **NULL** otherwise.
4284 * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
4286 * Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
4288 * *sk* if casting is valid, or **NULL** otherwise.
4290 * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
4292 * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
4294 * *sk* if casting is valid, or **NULL** otherwise.
4296 * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
4298 * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
4300 * *sk* if casting is valid, or **NULL** otherwise.
4302 * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
4304 * Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
4306 * *sk* if casting is valid, or **NULL** otherwise.
4308 * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
4310 * Return a user or a kernel stack in bpf program provided buffer.
4311 * To achieve this, the helper needs *task*, which is a valid
4312 * pointer to **struct task_struct**. To store the stacktrace, the
4313 * bpf program provides *buf* with a nonnegative *size*.
4315 * The last argument, *flags*, holds the number of stack frames to
4316 * skip (from 0 to 255), masked with
4317 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
4318 * the following flags:
4320 * **BPF_F_USER_STACK**
4321 * Collect a user space stack instead of a kernel stack.
4322 * **BPF_F_USER_BUILD_ID**
4323 * Collect buildid+offset instead of ips for user stack,
4324 * only valid if **BPF_F_USER_STACK** is also specified.
4326 * **bpf_get_task_stack**\ () can collect up to
4327 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
4328 * to sufficient large buffer size. Note that
4329 * this limit can be controlled with the **sysctl** program, and
4330 * that it should be manually increased in order to profile long
4331 * user stacks (such as stacks for Java programs). To do so, use:
4335 * # sysctl kernel.perf_event_max_stack=<new value>
4337 * The non-negative copied *buf* length equal to or less than
4338 * *size* on success, or a negative error in case of failure.
4340 * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
4342 * Load header option. Support reading a particular TCP header
4343 * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
4345 * If *flags* is 0, it will search the option from the
4346 * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
4347 * has details on what skb_data contains under different
4348 * *skops*\ **->op**.
4350 * The first byte of the *searchby_res* specifies the
4351 * kind that it wants to search.
4353 * If the searching kind is an experimental kind
4354 * (i.e. 253 or 254 according to RFC6994). It also
4355 * needs to specify the "magic" which is either
4356 * 2 bytes or 4 bytes. It then also needs to
4357 * specify the size of the magic by using
4358 * the 2nd byte which is "kind-length" of a TCP
4359 * header option and the "kind-length" also
4360 * includes the first 2 bytes "kind" and "kind-length"
4361 * itself as a normal TCP header option also does.
4363 * For example, to search experimental kind 254 with
4364 * 2 byte magic 0xeB9F, the searchby_res should be
4365 * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
4367 * To search for the standard window scale option (3),
4368 * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
4369 * Note, kind-length must be 0 for regular option.
4371 * Searching for No-Op (0) and End-of-Option-List (1) are
4374 * *len* must be at least 2 bytes which is the minimal size
4375 * of a header option.
4379 * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
4380 * saved_syn packet or the just-received syn packet.
4383 * > 0 when found, the header option is copied to *searchby_res*.
4384 * The return value is the total length copied. On failure, a
4385 * negative error code is returned:
4387 * **-EINVAL** if a parameter is invalid.
4389 * **-ENOMSG** if the option is not found.
4391 * **-ENOENT** if no syn packet is available when
4392 * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
4394 * **-ENOSPC** if there is not enough space. Only *len* number of
4397 * **-EFAULT** on failure to parse the header options in the
4400 * **-EPERM** if the helper cannot be used under the current
4401 * *skops*\ **->op**.
4403 * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
4405 * Store header option. The data will be copied
4406 * from buffer *from* with length *len* to the TCP header.
4408 * The buffer *from* should have the whole option that
4409 * includes the kind, kind-length, and the actual
4410 * option data. The *len* must be at least kind-length
4411 * long. The kind-length does not have to be 4 byte
4412 * aligned. The kernel will take care of the padding
4413 * and setting the 4 bytes aligned value to th->doff.
4415 * This helper will check for duplicated option
4416 * by searching the same option in the outgoing skb.
4418 * This helper can only be called during
4419 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
4422 * 0 on success, or negative error in case of failure:
4424 * **-EINVAL** If param is invalid.
4426 * **-ENOSPC** if there is not enough space in the header.
4427 * Nothing has been written
4429 * **-EEXIST** if the option already exists.
4431 * **-EFAULT** on failrue to parse the existing header options.
4433 * **-EPERM** if the helper cannot be used under the current
4434 * *skops*\ **->op**.
4436 * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
4438 * Reserve *len* bytes for the bpf header option. The
4439 * space will be used by **bpf_store_hdr_opt**\ () later in
4440 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
4442 * If **bpf_reserve_hdr_opt**\ () is called multiple times,
4443 * the total number of bytes will be reserved.
4445 * This helper can only be called during
4446 * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
4449 * 0 on success, or negative error in case of failure:
4451 * **-EINVAL** if a parameter is invalid.
4453 * **-ENOSPC** if there is not enough space in the header.
4455 * **-EPERM** if the helper cannot be used under the current
4456 * *skops*\ **->op**.
4458 * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
4460 * Get a bpf_local_storage from an *inode*.
4462 * Logically, it could be thought of as getting the value from
4463 * a *map* with *inode* as the **key**. From this
4464 * perspective, the usage is not much different from
4465 * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
4466 * helper enforces the key must be an inode and the map must also
4467 * be a **BPF_MAP_TYPE_INODE_STORAGE**.
4469 * Underneath, the value is stored locally at *inode* instead of
4470 * the *map*. The *map* is used as the bpf-local-storage
4471 * "type". The bpf-local-storage "type" (i.e. the *map*) is
4472 * searched against all bpf_local_storage residing at *inode*.
4474 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
4475 * used such that a new bpf_local_storage will be
4476 * created if one does not exist. *value* can be used
4477 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
4478 * the initial value of a bpf_local_storage. If *value* is
4479 * **NULL**, the new bpf_local_storage will be zero initialized.
4481 * A bpf_local_storage pointer is returned on success.
4483 * **NULL** if not found or there was an error in adding
4484 * a new bpf_local_storage.
4486 * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
4488 * Delete a bpf_local_storage from an *inode*.
4492 * **-ENOENT** if the bpf_local_storage cannot be found.
4494 * long bpf_d_path(struct path *path, char *buf, u32 sz)
4496 * Return full path for given **struct path** object, which
4497 * needs to be the kernel BTF *path* object. The path is
4498 * returned in the provided buffer *buf* of size *sz* and
4499 * is zero terminated.
4502 * On success, the strictly positive length of the string,
4503 * including the trailing NUL character. On error, a negative
4506 * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
4508 * Read *size* bytes from user space address *user_ptr* and store
4509 * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
4511 * 0 on success, or a negative error in case of failure.
4513 * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
4515 * Use BTF to store a string representation of *ptr*->ptr in *str*,
4516 * using *ptr*->type_id. This value should specify the type
4517 * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
4518 * can be used to look up vmlinux BTF type ids. Traversing the
4519 * data structure using BTF, the type information and values are
4520 * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
4521 * the pointer data is carried out to avoid kernel crashes during
4522 * operation. Smaller types can use string space on the stack;
4523 * larger programs can use map data to store the string
4526 * The string can be subsequently shared with userspace via
4527 * bpf_perf_event_output() or ring buffer interfaces.
4528 * bpf_trace_printk() is to be avoided as it places too small
4529 * a limit on string size to be useful.
4531 * *flags* is a combination of
4534 * no formatting around type information
4536 * no struct/union member names/types
4538 * show raw (unobfuscated) pointer values;
4539 * equivalent to printk specifier %px.
4541 * show zero-valued struct/union members; they
4542 * are not displayed by default
4545 * The number of bytes that were written (or would have been
4546 * written if output had to be truncated due to string size),
4547 * or a negative error in cases of failure.
4549 * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
4551 * Use BTF to write to seq_write a string representation of
4552 * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
4553 * *flags* are identical to those used for bpf_snprintf_btf.
4555 * 0 on success or a negative error in case of failure.
4557 * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
4559 * See **bpf_get_cgroup_classid**\ () for the main description.
4560 * This helper differs from **bpf_get_cgroup_classid**\ () in that
4561 * the cgroup v1 net_cls class is retrieved only from the *skb*'s
4562 * associated socket instead of the current process.
4564 * The id is returned or 0 in case the id could not be retrieved.
4566 * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags)
4568 * Redirect the packet to another net device of index *ifindex*
4569 * and fill in L2 addresses from neighboring subsystem. This helper
4570 * is somewhat similar to **bpf_redirect**\ (), except that it
4571 * populates L2 addresses as well, meaning, internally, the helper
4572 * relies on the neighbor lookup for the L2 address of the nexthop.
4574 * The helper will perform a FIB lookup based on the skb's
4575 * networking header to get the address of the next hop, unless
4576 * this is supplied by the caller in the *params* argument. The
4577 * *plen* argument indicates the len of *params* and should be set
4578 * to 0 if *params* is NULL.
4580 * The *flags* argument is reserved and must be 0. The helper is
4581 * currently only supported for tc BPF program types, and enabled
4582 * for IPv4 and IPv6 protocols.
4584 * The helper returns **TC_ACT_REDIRECT** on success or
4585 * **TC_ACT_SHOT** on error.
4587 * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
4589 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
4590 * pointer to the percpu kernel variable on *cpu*. A ksym is an
4591 * extern variable decorated with '__ksym'. For ksym, there is a
4592 * global var (either static or global) defined of the same name
4593 * in the kernel. The ksym is percpu if the global var is percpu.
4594 * The returned pointer points to the global percpu var on *cpu*.
4596 * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
4597 * kernel, except that bpf_per_cpu_ptr() may return NULL. This
4598 * happens if *cpu* is larger than nr_cpu_ids. The caller of
4599 * bpf_per_cpu_ptr() must check the returned value.
4601 * A pointer pointing to the kernel percpu variable on *cpu*, or
4602 * NULL, if *cpu* is invalid.
4604 * void *bpf_this_cpu_ptr(const void *percpu_ptr)
4606 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
4607 * pointer to the percpu kernel variable on this cpu. See the
4608 * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
4610 * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
4611 * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
4612 * never return NULL.
4614 * A pointer pointing to the kernel percpu variable on this cpu.
4616 * long bpf_redirect_peer(u32 ifindex, u64 flags)
4618 * Redirect the packet to another net device of index *ifindex*.
4619 * This helper is somewhat similar to **bpf_redirect**\ (), except
4620 * that the redirection happens to the *ifindex*' peer device and
4621 * the netns switch takes place from ingress to ingress without
4622 * going through the CPU's backlog queue.
4624 * The *flags* argument is reserved and must be 0. The helper is
4625 * currently only supported for tc BPF program types at the ingress
4626 * hook and for veth device types. The peer device must reside in a
4627 * different network namespace.
4629 * The helper returns **TC_ACT_REDIRECT** on success or
4630 * **TC_ACT_SHOT** on error.
4632 * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
4634 * Get a bpf_local_storage from the *task*.
4636 * Logically, it could be thought of as getting the value from
4637 * a *map* with *task* as the **key**. From this
4638 * perspective, the usage is not much different from
4639 * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
4640 * helper enforces the key must be an task_struct and the map must also
4641 * be a **BPF_MAP_TYPE_TASK_STORAGE**.
4643 * Underneath, the value is stored locally at *task* instead of
4644 * the *map*. The *map* is used as the bpf-local-storage
4645 * "type". The bpf-local-storage "type" (i.e. the *map*) is
4646 * searched against all bpf_local_storage residing at *task*.
4648 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
4649 * used such that a new bpf_local_storage will be
4650 * created if one does not exist. *value* can be used
4651 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
4652 * the initial value of a bpf_local_storage. If *value* is
4653 * **NULL**, the new bpf_local_storage will be zero initialized.
4655 * A bpf_local_storage pointer is returned on success.
4657 * **NULL** if not found or there was an error in adding
4658 * a new bpf_local_storage.
4660 * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
4662 * Delete a bpf_local_storage from a *task*.
4666 * **-ENOENT** if the bpf_local_storage cannot be found.
4668 * struct task_struct *bpf_get_current_task_btf(void)
4670 * Return a BTF pointer to the "current" task.
4671 * This pointer can also be used in helpers that accept an
4672 * *ARG_PTR_TO_BTF_ID* of type *task_struct*.
4674 * Pointer to the current task.
4676 * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags)
4678 * Set or clear certain options on *bprm*:
4680 * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
4681 * which sets the **AT_SECURE** auxv for glibc. The bit
4682 * is cleared if the flag is not specified.
4684 * **-EINVAL** if invalid *flags* are passed, zero otherwise.
4686 * u64 bpf_ktime_get_coarse_ns(void)
4688 * Return a coarse-grained version of the time elapsed since
4689 * system boot, in nanoseconds. Does not include time the system
4692 * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
4696 * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
4698 * Returns the stored IMA hash of the *inode* (if it's avaialable).
4699 * If the hash is larger than *size*, then only *size*
4700 * bytes will be copied to *dst*
4702 * The **hash_algo** is returned on success,
4703 * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
4704 * invalid arguments are passed.
4706 * struct socket *bpf_sock_from_file(struct file *file)
4708 * If the given file represents a socket, returns the associated
4711 * A pointer to a struct socket on success or NULL if the file is
4714 * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
4716 * Check packet size against exceeding MTU of net device (based
4717 * on *ifindex*). This helper will likely be used in combination
4718 * with helpers that adjust/change the packet size.
4720 * The argument *len_diff* can be used for querying with a planned
4721 * size change. This allows to check MTU prior to changing packet
4722 * ctx. Providing an *len_diff* adjustment that is larger than the
4723 * actual packet size (resulting in negative packet size) will in
4724 * principle not exceed the MTU, why it is not considered a
4725 * failure. Other BPF-helpers are needed for performing the
4726 * planned size change, why the responsability for catch a negative
4727 * packet size belong in those helpers.
4729 * Specifying *ifindex* zero means the MTU check is performed
4730 * against the current net device. This is practical if this isn't
4731 * used prior to redirect.
4733 * On input *mtu_len* must be a valid pointer, else verifier will
4734 * reject BPF program. If the value *mtu_len* is initialized to
4735 * zero then the ctx packet size is use. When value *mtu_len* is
4736 * provided as input this specify the L3 length that the MTU check
4737 * is done against. Remember XDP and TC length operate at L2, but
4738 * this value is L3 as this correlate to MTU and IP-header tot_len
4739 * values which are L3 (similar behavior as bpf_fib_lookup).
4741 * The Linux kernel route table can configure MTUs on a more
4742 * specific per route level, which is not provided by this helper.
4743 * For route level MTU checks use the **bpf_fib_lookup**\ ()
4746 * *ctx* is either **struct xdp_md** for XDP programs or
4747 * **struct sk_buff** for tc cls_act programs.
4749 * The *flags* argument can be a combination of one or more of the
4752 * **BPF_MTU_CHK_SEGS**
4753 * This flag will only works for *ctx* **struct sk_buff**.
4754 * If packet context contains extra packet segment buffers
4755 * (often knows as GSO skb), then MTU check is harder to
4756 * check at this point, because in transmit path it is
4757 * possible for the skb packet to get re-segmented
4758 * (depending on net device features). This could still be
4759 * a MTU violation, so this flag enables performing MTU
4760 * check against segments, with a different violation
4761 * return code to tell it apart. Check cannot use len_diff.
4763 * On return *mtu_len* pointer contains the MTU value of the net
4764 * device. Remember the net device configured MTU is the L3 size,
4765 * which is returned here and XDP and TC length operate at L2.
4766 * Helper take this into account for you, but remember when using
4767 * MTU value in your BPF-code.
4770 * * 0 on success, and populate MTU value in *mtu_len* pointer.
4772 * * < 0 if any input argument is invalid (*mtu_len* not updated)
4774 * MTU violations return positive values, but also populate MTU
4775 * value in *mtu_len* pointer, as this can be needed for
4776 * implementing PMTU handing:
4778 * * **BPF_MTU_CHK_RET_FRAG_NEEDED**
4779 * * **BPF_MTU_CHK_RET_SEGS_TOOBIG**
4781 * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags)
4783 * For each element in **map**, call **callback_fn** function with
4784 * **map**, **callback_ctx** and other map-specific parameters.
4785 * The **callback_fn** should be a static function and
4786 * the **callback_ctx** should be a pointer to the stack.
4787 * The **flags** is used to control certain aspects of the helper.
4788 * Currently, the **flags** must be 0.
4790 * The following are a list of supported map types and their
4791 * respective expected callback signatures:
4793 * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
4794 * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
4795 * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
4797 * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
4799 * For per_cpu maps, the map_value is the value on the cpu where the
4800 * bpf_prog is running.
4802 * If **callback_fn** return 0, the helper will continue to the next
4803 * element. If return value is 1, the helper will skip the rest of
4804 * elements and return. Other return values are not used now.
4807 * The number of traversed map elements for success, **-EINVAL** for
4808 * invalid **flags**.
4810 * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len)
4812 * Outputs a string into the **str** buffer of size **str_size**
4813 * based on a format string stored in a read-only map pointed by
4816 * Each format specifier in **fmt** corresponds to one u64 element
4817 * in the **data** array. For strings and pointers where pointees
4818 * are accessed, only the pointer values are stored in the *data*
4819 * array. The *data_len* is the size of *data* in bytes - must be
4822 * Formats **%s** and **%p{i,I}{4,6}** require to read kernel
4823 * memory. Reading kernel memory may fail due to either invalid
4824 * address or valid address but requiring a major memory fault. If
4825 * reading kernel memory fails, the string for **%s** will be an
4826 * empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
4827 * Not returning error to bpf program is consistent with what
4828 * **bpf_trace_printk**\ () does for now.
4831 * The strictly positive length of the formatted string, including
4832 * the trailing zero character. If the return value is greater than
4833 * **str_size**, **str** contains a truncated string, guaranteed to
4834 * be zero-terminated except when **str_size** is 0.
4836 * Or **-EBUSY** if the per-CPU memory copy buffer is busy.
4838 * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
4840 * Execute bpf syscall with given arguments.
4844 * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
4846 * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
4848 * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
4850 * long bpf_sys_close(u32 fd)
4852 * Execute close syscall for given FD.
4856 * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags)
4858 * Initialize the timer.
4859 * First 4 bits of *flags* specify clockid.
4860 * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
4861 * All other bits of *flags* are reserved.
4862 * The verifier will reject the program if *timer* is not from
4866 * **-EBUSY** if *timer* is already initialized.
4867 * **-EINVAL** if invalid *flags* are passed.
4868 * **-EPERM** if *timer* is in a map that doesn't have any user references.
4869 * The user space should either hold a file descriptor to a map with timers
4870 * or pin such map in bpffs. When map is unpinned or file descriptor is
4871 * closed all timers in the map will be cancelled and freed.
4873 * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn)
4875 * Configure the timer to call *callback_fn* static function.
4878 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
4879 * **-EPERM** if *timer* is in a map that doesn't have any user references.
4880 * The user space should either hold a file descriptor to a map with timers
4881 * or pin such map in bpffs. When map is unpinned or file descriptor is
4882 * closed all timers in the map will be cancelled and freed.
4884 * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags)
4886 * Set timer expiration N nanoseconds from the current time. The
4887 * configured callback will be invoked in soft irq context on some cpu
4888 * and will not repeat unless another bpf_timer_start() is made.
4889 * In such case the next invocation can migrate to a different cpu.
4890 * Since struct bpf_timer is a field inside map element the map
4891 * owns the timer. The bpf_timer_set_callback() will increment refcnt
4892 * of BPF program to make sure that callback_fn code stays valid.
4893 * When user space reference to a map reaches zero all timers
4894 * in a map are cancelled and corresponding program's refcnts are
4895 * decremented. This is done to make sure that Ctrl-C of a user
4896 * process doesn't leave any timers running. If map is pinned in
4897 * bpffs the callback_fn can re-arm itself indefinitely.
4898 * bpf_map_update/delete_elem() helpers and user space sys_bpf commands
4899 * cancel and free the timer in the given map element.
4900 * The map can contain timers that invoke callback_fn-s from different
4901 * programs. The same callback_fn can serve different timers from
4902 * different maps if key/value layout matches across maps.
4903 * Every bpf_timer_set_callback() can have different callback_fn.
4907 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
4908 * or invalid *flags* are passed.
4910 * long bpf_timer_cancel(struct bpf_timer *timer)
4912 * Cancel the timer and wait for callback_fn to finish if it was running.
4914 * 0 if the timer was not active.
4915 * 1 if the timer was active.
4916 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
4917 * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
4918 * own timer which would have led to a deadlock otherwise.
4920 * u64 bpf_get_func_ip(void *ctx)
4922 * Get address of the traced function (for tracing and kprobe programs).
4924 * Address of the traced function.
4926 * u64 bpf_get_attach_cookie(void *ctx)
4928 * Get bpf_cookie value provided (optionally) during the program
4929 * attachment. It might be different for each individual
4930 * attachment, even if BPF program itself is the same.
4931 * Expects BPF program context *ctx* as a first argument.
4933 * Supported for the following program types:
4938 * Value specified by user at BPF link creation/attachment time
4939 * or 0, if it was not specified.
4941 * long bpf_task_pt_regs(struct task_struct *task)
4943 * Get the struct pt_regs associated with **task**.
4945 * A pointer to struct pt_regs.
4947 * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
4949 * Get branch trace from hardware engines like Intel LBR. The
4950 * hardware engine is stopped shortly after the helper is
4951 * called. Therefore, the user need to filter branch entries
4952 * based on the actual use case. To capture branch trace
4953 * before the trigger point of the BPF program, the helper
4954 * should be called at the beginning of the BPF program.
4956 * The data is stored as struct perf_branch_entry into output
4957 * buffer *entries*. *size* is the size of *entries* in bytes.
4958 * *flags* is reserved for now and must be zero.
4961 * On success, number of bytes written to *buf*. On error, a
4964 * **-EINVAL** if *flags* is not zero.
4966 * **-ENOENT** if architecture does not support branch records.
4968 * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
4970 * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
4971 * to format and can handle more format args as a result.
4973 * Arguments are to be used as in **bpf_seq_printf**\ () helper.
4975 * The number of bytes written to the buffer, or a negative error
4976 * in case of failure.
4978 * struct unix_sock *bpf_skc_to_unix_sock(void *sk)
4980 * Dynamically cast a *sk* pointer to a *unix_sock* pointer.
4982 * *sk* if casting is valid, or **NULL** otherwise.
4984 * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
4986 * Get the address of a kernel symbol, returned in *res*. *res* is
4987 * set to 0 if the symbol is not found.
4989 * On success, zero. On error, a negative value.
4991 * **-EINVAL** if *flags* is not zero.
4993 * **-EINVAL** if string *name* is not the same size as *name_sz*.
4995 * **-ENOENT** if symbol is not found.
4997 * **-EPERM** if caller does not have permission to obtain kernel address.
4999 * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
5001 * Find vma of *task* that contains *addr*, call *callback_fn*
5002 * function with *task*, *vma*, and *callback_ctx*.
5003 * The *callback_fn* should be a static function and
5004 * the *callback_ctx* should be a pointer to the stack.
5005 * The *flags* is used to control certain aspects of the helper.
5006 * Currently, the *flags* must be 0.
5008 * The expected callback signature is
5010 * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
5014 * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
5015 * **-EBUSY** if failed to try lock mmap_lock.
5016 * **-EINVAL** for invalid **flags**.
5018 * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags)
5020 * For **nr_loops**, call **callback_fn** function
5021 * with **callback_ctx** as the context parameter.
5022 * The **callback_fn** should be a static function and
5023 * the **callback_ctx** should be a pointer to the stack.
5024 * The **flags** is used to control certain aspects of the helper.
5025 * Currently, the **flags** must be 0. Currently, nr_loops is
5026 * limited to 1 << 23 (~8 million) loops.
5028 * long (\*callback_fn)(u32 index, void \*ctx);
5030 * where **index** is the current index in the loop. The index
5033 * If **callback_fn** returns 0, the helper will continue to the next
5034 * loop. If return value is 1, the helper will skip the rest of
5035 * the loops and return. Other return values are not used now,
5036 * and will be rejected by the verifier.
5039 * The number of loops performed, **-EINVAL** for invalid **flags**,
5040 * **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
5042 * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
5044 * Do strncmp() between **s1** and **s2**. **s1** doesn't need
5045 * to be null-terminated and **s1_sz** is the maximum storage
5046 * size of **s1**. **s2** must be a read-only string.
5048 * An integer less than, equal to, or greater than zero
5049 * if the first **s1_sz** bytes of **s1** is found to be
5050 * less than, to match, or be greater than **s2**.
5052 * long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
5054 * Get **n**-th argument (zero based) of the traced function (for tracing programs)
5055 * returned in **value**.
5059 * **-EINVAL** if n >= arguments count of traced function.
5061 * long bpf_get_func_ret(void *ctx, u64 *value)
5063 * Get return value of the traced function (for tracing programs)
5068 * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
5070 * long bpf_get_func_arg_cnt(void *ctx)
5072 * Get number of arguments of the traced function (for tracing programs).
5075 * The number of arguments of the traced function.
5077 * int bpf_get_retval(void)
5079 * Get the syscall's return value that will be returned to userspace.
5081 * This helper is currently supported by cgroup programs only.
5083 * The syscall's return value.
5085 * int bpf_set_retval(int retval)
5087 * Set the syscall's return value that will be returned to userspace.
5089 * This helper is currently supported by cgroup programs only.
5091 * 0 on success, or a negative error in case of failure.
5093 * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md)
5095 * Get the total size of a given xdp buff (linear and paged area)
5097 * The total size of a given xdp buffer.
5099 * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
5101 * This helper is provided as an easy way to load data from a
5102 * xdp buffer. It can be used to load *len* bytes from *offset* from
5103 * the frame associated to *xdp_md*, into the buffer pointed by
5106 * 0 on success, or a negative error in case of failure.
5108 * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
5110 * Store *len* bytes from buffer *buf* into the frame
5111 * associated to *xdp_md*, at *offset*.
5113 * 0 on success, or a negative error in case of failure.
5115 * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
5117 * Read *size* bytes from user space address *user_ptr* in *tsk*'s
5118 * address space, and stores the data in *dst*. *flags* is not
5119 * used yet and is provided for future extensibility. This helper
5120 * can only be used by sleepable programs.
5122 * 0 on success, or a negative error in case of failure. On error
5123 * *dst* buffer is zeroed out.
5125 * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type)
5127 * Change the __sk_buff->tstamp_type to *tstamp_type*
5128 * and set *tstamp* to the __sk_buff->tstamp together.
5130 * If there is no need to change the __sk_buff->tstamp_type,
5131 * the tstamp value can be directly written to __sk_buff->tstamp
5134 * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
5135 * will be kept during bpf_redirect_*(). A non zero
5136 * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
5139 * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
5140 * with a zero *tstamp*.
5142 * Only IPv4 and IPv6 skb->protocol are supported.
5144 * This function is most useful when it needs to set a
5145 * mono delivery time to __sk_buff->tstamp and then
5146 * bpf_redirect_*() to the egress of an iface. For example,
5147 * changing the (rcv) timestamp in __sk_buff->tstamp at
5148 * ingress to a mono delivery time and then bpf_redirect_*()
5149 * to sch_fq@phy-dev.
5152 * **-EINVAL** for invalid input
5153 * **-EOPNOTSUPP** for unsupported protocol
5155 * long bpf_ima_file_hash(struct file *file, void *dst, u32 size)
5157 * Returns a calculated IMA hash of the *file*.
5158 * If the hash is larger than *size*, then only *size*
5159 * bytes will be copied to *dst*
5161 * The **hash_algo** is returned on success,
5162 * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
5163 * invalid arguments are passed.
5165 * void *bpf_kptr_xchg(void *map_value, void *ptr)
5167 * Exchange kptr at pointer *map_value* with *ptr*, and return the
5168 * old value. *ptr* can be NULL, otherwise it must be a referenced
5169 * pointer which will be released when this helper is called.
5171 * The old value of kptr (which can be NULL). The returned pointer
5172 * if not NULL, is a reference which must be released using its
5173 * corresponding release function, or moved into a BPF map before
5176 * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
5178 * Perform a lookup in *percpu map* for an entry associated to
5181 * Map value associated to *key* on *cpu*, or **NULL** if no entry
5182 * was found or *cpu* is invalid.
5184 * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
5186 * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
5188 * *sk* if casting is valid, or **NULL** otherwise.
5190 * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
5192 * Get a dynptr to local memory *data*.
5194 * *data* must be a ptr to a map value.
5195 * The maximum *size* supported is DYNPTR_MAX_SIZE.
5196 * *flags* is currently unused.
5198 * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
5199 * -EINVAL if flags is not 0.
5201 * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
5203 * Reserve *size* bytes of payload in a ring buffer *ringbuf*
5204 * through the dynptr interface. *flags* must be 0.
5206 * Please note that a corresponding bpf_ringbuf_submit_dynptr or
5207 * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
5208 * reservation fails. This is enforced by the verifier.
5210 * 0 on success, or a negative error in case of failure.
5212 * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
5214 * Submit reserved ring buffer sample, pointed to by *data*,
5215 * through the dynptr interface. This is a no-op if the dynptr is
5218 * For more information on *flags*, please see
5219 * 'bpf_ringbuf_submit'.
5221 * Nothing. Always succeeds.
5223 * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
5225 * Discard reserved ring buffer sample through the dynptr
5226 * interface. This is a no-op if the dynptr is invalid/null.
5228 * For more information on *flags*, please see
5229 * 'bpf_ringbuf_discard'.
5231 * Nothing. Always succeeds.
5233 * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)
5235 * Read *len* bytes from *src* into *dst*, starting from *offset*
5237 * *flags* is currently unused.
5239 * 0 on success, -E2BIG if *offset* + *len* exceeds the length
5240 * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
5243 * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
5245 * Write *len* bytes from *src* into *dst*, starting from *offset*
5247 * *flags* is currently unused.
5249 * 0 on success, -E2BIG if *offset* + *len* exceeds the length
5250 * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
5251 * is a read-only dynptr or if *flags* is not 0.
5253 * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
5255 * Get a pointer to the underlying dynptr data.
5257 * *len* must be a statically known value. The returned data slice
5258 * is invalidated whenever the dynptr is invalidated.
5260 * Pointer to the underlying dynptr data, NULL if the dynptr is
5261 * read-only, if the dynptr is invalid, or if the offset and length
5264 * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
5266 * Try to issue a SYN cookie for the packet with corresponding
5267 * IPv4/TCP headers, *iph* and *th*, without depending on a
5270 * *iph* points to the IPv4 header.
5272 * *th* points to the start of the TCP header, while *th_len*
5273 * contains the length of the TCP header (at least
5274 * **sizeof**\ (**struct tcphdr**)).
5276 * On success, lower 32 bits hold the generated SYN cookie in
5277 * followed by 16 bits which hold the MSS value for that cookie,
5278 * and the top 16 bits are unused.
5280 * On failure, the returned value is one of the following:
5282 * **-EINVAL** if *th_len* is invalid.
5284 * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
5286 * Try to issue a SYN cookie for the packet with corresponding
5287 * IPv6/TCP headers, *iph* and *th*, without depending on a
5290 * *iph* points to the IPv6 header.
5292 * *th* points to the start of the TCP header, while *th_len*
5293 * contains the length of the TCP header (at least
5294 * **sizeof**\ (**struct tcphdr**)).
5296 * On success, lower 32 bits hold the generated SYN cookie in
5297 * followed by 16 bits which hold the MSS value for that cookie,
5298 * and the top 16 bits are unused.
5300 * On failure, the returned value is one of the following:
5302 * **-EINVAL** if *th_len* is invalid.
5304 * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
5306 * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
5308 * Check whether *iph* and *th* contain a valid SYN cookie ACK
5309 * without depending on a listening socket.
5311 * *iph* points to the IPv4 header.
5313 * *th* points to the TCP header.
5315 * 0 if *iph* and *th* are a valid SYN cookie ACK.
5317 * On failure, the returned value is one of the following:
5319 * **-EACCES** if the SYN cookie is not valid.
5321 * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
5323 * Check whether *iph* and *th* contain a valid SYN cookie ACK
5324 * without depending on a listening socket.
5326 * *iph* points to the IPv6 header.
5328 * *th* points to the TCP header.
5330 * 0 if *iph* and *th* are a valid SYN cookie ACK.
5332 * On failure, the returned value is one of the following:
5334 * **-EACCES** if the SYN cookie is not valid.
5336 * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
5338 #define __BPF_FUNC_MAPPER(FN) \
5340 FN(map_lookup_elem), \
5341 FN(map_update_elem), \
5342 FN(map_delete_elem), \
5346 FN(get_prandom_u32), \
5347 FN(get_smp_processor_id), \
5348 FN(skb_store_bytes), \
5349 FN(l3_csum_replace), \
5350 FN(l4_csum_replace), \
5352 FN(clone_redirect), \
5353 FN(get_current_pid_tgid), \
5354 FN(get_current_uid_gid), \
5355 FN(get_current_comm), \
5356 FN(get_cgroup_classid), \
5357 FN(skb_vlan_push), \
5359 FN(skb_get_tunnel_key), \
5360 FN(skb_set_tunnel_key), \
5361 FN(perf_event_read), \
5363 FN(get_route_realm), \
5364 FN(perf_event_output), \
5365 FN(skb_load_bytes), \
5368 FN(skb_get_tunnel_opt), \
5369 FN(skb_set_tunnel_opt), \
5370 FN(skb_change_proto), \
5371 FN(skb_change_type), \
5372 FN(skb_under_cgroup), \
5373 FN(get_hash_recalc), \
5374 FN(get_current_task), \
5375 FN(probe_write_user), \
5376 FN(current_task_under_cgroup), \
5377 FN(skb_change_tail), \
5378 FN(skb_pull_data), \
5380 FN(set_hash_invalid), \
5381 FN(get_numa_node_id), \
5382 FN(skb_change_head), \
5383 FN(xdp_adjust_head), \
5384 FN(probe_read_str), \
5385 FN(get_socket_cookie), \
5386 FN(get_socket_uid), \
5389 FN(skb_adjust_room), \
5391 FN(sk_redirect_map), \
5392 FN(sock_map_update), \
5393 FN(xdp_adjust_meta), \
5394 FN(perf_event_read_value), \
5395 FN(perf_prog_read_value), \
5397 FN(override_return), \
5398 FN(sock_ops_cb_flags_set), \
5399 FN(msg_redirect_map), \
5400 FN(msg_apply_bytes), \
5401 FN(msg_cork_bytes), \
5402 FN(msg_pull_data), \
5404 FN(xdp_adjust_tail), \
5405 FN(skb_get_xfrm_state), \
5407 FN(skb_load_bytes_relative), \
5409 FN(sock_hash_update), \
5410 FN(msg_redirect_hash), \
5411 FN(sk_redirect_hash), \
5412 FN(lwt_push_encap), \
5413 FN(lwt_seg6_store_bytes), \
5414 FN(lwt_seg6_adjust_srh), \
5415 FN(lwt_seg6_action), \
5418 FN(skb_cgroup_id), \
5419 FN(get_current_cgroup_id), \
5420 FN(get_local_storage), \
5421 FN(sk_select_reuseport), \
5422 FN(skb_ancestor_cgroup_id), \
5423 FN(sk_lookup_tcp), \
5424 FN(sk_lookup_udp), \
5426 FN(map_push_elem), \
5428 FN(map_peek_elem), \
5429 FN(msg_push_data), \
5431 FN(rc_pointer_rel), \
5436 FN(skb_ecn_set_ce), \
5437 FN(get_listener_sock), \
5438 FN(skc_lookup_tcp), \
5439 FN(tcp_check_syncookie), \
5440 FN(sysctl_get_name), \
5441 FN(sysctl_get_current_value), \
5442 FN(sysctl_get_new_value), \
5443 FN(sysctl_set_new_value), \
5446 FN(sk_storage_get), \
5447 FN(sk_storage_delete), \
5449 FN(tcp_gen_syncookie), \
5451 FN(probe_read_user), \
5452 FN(probe_read_kernel), \
5453 FN(probe_read_user_str), \
5454 FN(probe_read_kernel_str), \
5456 FN(send_signal_thread), \
5458 FN(read_branch_records), \
5459 FN(get_ns_current_pid_tgid), \
5461 FN(get_netns_cookie), \
5462 FN(get_current_ancestor_cgroup_id), \
5464 FN(ktime_get_boot_ns), \
5468 FN(sk_ancestor_cgroup_id), \
5469 FN(ringbuf_output), \
5470 FN(ringbuf_reserve), \
5471 FN(ringbuf_submit), \
5472 FN(ringbuf_discard), \
5473 FN(ringbuf_query), \
5475 FN(skc_to_tcp6_sock), \
5476 FN(skc_to_tcp_sock), \
5477 FN(skc_to_tcp_timewait_sock), \
5478 FN(skc_to_tcp_request_sock), \
5479 FN(skc_to_udp6_sock), \
5480 FN(get_task_stack), \
5482 FN(store_hdr_opt), \
5483 FN(reserve_hdr_opt), \
5484 FN(inode_storage_get), \
5485 FN(inode_storage_delete), \
5487 FN(copy_from_user), \
5489 FN(seq_printf_btf), \
5490 FN(skb_cgroup_classid), \
5491 FN(redirect_neigh), \
5494 FN(redirect_peer), \
5495 FN(task_storage_get), \
5496 FN(task_storage_delete), \
5497 FN(get_current_task_btf), \
5498 FN(bprm_opts_set), \
5499 FN(ktime_get_coarse_ns), \
5500 FN(ima_inode_hash), \
5501 FN(sock_from_file), \
5503 FN(for_each_map_elem), \
5506 FN(btf_find_by_name_kind), \
5509 FN(timer_set_callback), \
5513 FN(get_attach_cookie), \
5515 FN(get_branch_snapshot), \
5516 FN(trace_vprintk), \
5517 FN(skc_to_unix_sock), \
5518 FN(kallsyms_lookup_name), \
5524 FN(get_func_arg_cnt), \
5527 FN(xdp_get_buff_len), \
5528 FN(xdp_load_bytes), \
5529 FN(xdp_store_bytes), \
5530 FN(copy_from_user_task), \
5531 FN(skb_set_tstamp), \
5532 FN(ima_file_hash), \
5534 FN(map_lookup_percpu_elem), \
5535 FN(skc_to_mptcp_sock), \
5536 FN(dynptr_from_mem), \
5537 FN(ringbuf_reserve_dynptr), \
5538 FN(ringbuf_submit_dynptr), \
5539 FN(ringbuf_discard_dynptr), \
5543 FN(tcp_raw_gen_syncookie_ipv4), \
5544 FN(tcp_raw_gen_syncookie_ipv6), \
5545 FN(tcp_raw_check_syncookie_ipv4), \
5546 FN(tcp_raw_check_syncookie_ipv6), \
5549 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
5550 * function eBPF program intends to call
5552 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
5554 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
5557 #undef __BPF_ENUM_FN
5559 /* All flags used by eBPF helper functions, placed here. */
5561 /* BPF_FUNC_skb_store_bytes flags. */
5563 BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
5564 BPF_F_INVALIDATE_HASH = (1ULL << 1),
5567 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
5568 * First 4 bits are for passing the header field size.
5571 BPF_F_HDR_FIELD_MASK = 0xfULL,
5574 /* BPF_FUNC_l4_csum_replace flags. */
5576 BPF_F_PSEUDO_HDR = (1ULL << 4),
5577 BPF_F_MARK_MANGLED_0 = (1ULL << 5),
5578 BPF_F_MARK_ENFORCE = (1ULL << 6),
5581 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
5583 BPF_F_INGRESS = (1ULL << 0),
5586 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
5588 BPF_F_TUNINFO_IPV6 = (1ULL << 0),
5591 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
5593 BPF_F_SKIP_FIELD_MASK = 0xffULL,
5594 BPF_F_USER_STACK = (1ULL << 8),
5595 /* flags used by BPF_FUNC_get_stackid only. */
5596 BPF_F_FAST_STACK_CMP = (1ULL << 9),
5597 BPF_F_REUSE_STACKID = (1ULL << 10),
5598 /* flags used by BPF_FUNC_get_stack only. */
5599 BPF_F_USER_BUILD_ID = (1ULL << 11),
5602 /* BPF_FUNC_skb_set_tunnel_key flags. */
5604 BPF_F_ZERO_CSUM_TX = (1ULL << 1),
5605 BPF_F_DONT_FRAGMENT = (1ULL << 2),
5606 BPF_F_SEQ_NUMBER = (1ULL << 3),
5609 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
5610 * BPF_FUNC_perf_event_read_value flags.
5613 BPF_F_INDEX_MASK = 0xffffffffULL,
5614 BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
5615 /* BPF_FUNC_perf_event_output for sk_buff input context. */
5616 BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
5619 /* Current network namespace */
5621 BPF_F_CURRENT_NETNS = (-1L),
5624 /* BPF_FUNC_csum_level level values. */
5626 BPF_CSUM_LEVEL_QUERY,
5629 BPF_CSUM_LEVEL_RESET,
5632 /* BPF_FUNC_skb_adjust_room flags. */
5634 BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
5635 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
5636 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
5637 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
5638 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
5639 BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
5640 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
5644 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
5645 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
5648 #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
5649 BPF_ADJ_ROOM_ENCAP_L2_MASK) \
5650 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
5652 /* BPF_FUNC_sysctl_get_name flags. */
5654 BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
5657 /* BPF_FUNC_<kernel_obj>_storage_get flags */
5659 BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),
5660 /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
5661 * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
5663 BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
5666 /* BPF_FUNC_read_branch_records flags. */
5668 BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
5671 /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and
5672 * BPF_FUNC_bpf_ringbuf_output flags.
5675 BPF_RB_NO_WAKEUP = (1ULL << 0),
5676 BPF_RB_FORCE_WAKEUP = (1ULL << 1),
5679 /* BPF_FUNC_bpf_ringbuf_query flags */
5681 BPF_RB_AVAIL_DATA = 0,
5682 BPF_RB_RING_SIZE = 1,
5683 BPF_RB_CONS_POS = 2,
5684 BPF_RB_PROD_POS = 3,
5687 /* BPF ring buffer constants */
5689 BPF_RINGBUF_BUSY_BIT = (1U << 31),
5690 BPF_RINGBUF_DISCARD_BIT = (1U << 30),
5691 BPF_RINGBUF_HDR_SZ = 8,
5694 /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
5696 BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0),
5697 BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1),
5700 /* Mode for BPF_FUNC_skb_adjust_room helper. */
5701 enum bpf_adj_room_mode {
5706 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
5707 enum bpf_hdr_start_off {
5712 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
5713 enum bpf_lwt_encap_mode {
5715 BPF_LWT_ENCAP_SEG6_INLINE,
5719 /* Flags for bpf_bprm_opts_set helper */
5721 BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
5724 /* Flags for bpf_redirect_map helper */
5726 BPF_F_BROADCAST = (1ULL << 3),
5727 BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
5730 #define __bpf_md_ptr(type, name) \
5734 } __attribute__((aligned(8)))
5737 BPF_SKB_TSTAMP_UNSPEC,
5738 BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
5739 /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
5740 * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
5741 * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
5745 /* user accessible mirror of in-kernel sk_buff.
5746 * new fields can only be added to the end of this structure
5752 __u32 queue_mapping;
5758 __u32 ingress_ifindex;
5768 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
5770 __u32 remote_ip4; /* Stored in network byte order */
5771 __u32 local_ip4; /* Stored in network byte order */
5772 __u32 remote_ip6[4]; /* Stored in network byte order */
5773 __u32 local_ip6[4]; /* Stored in network byte order */
5774 __u32 remote_port; /* Stored in network byte order */
5775 __u32 local_port; /* stored in host byte order */
5779 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
5783 __bpf_md_ptr(struct bpf_sock *, sk);
5786 __u32 :24; /* Padding, future use. */
5790 struct bpf_tunnel_key {
5794 __u32 remote_ipv6[4];
5798 __u16 tunnel_ext; /* Padding, future use. */
5802 __u32 local_ipv6[4];
5806 /* user accessible mirror of in-kernel xfrm_state.
5807 * new fields can only be added to the end of this structure
5809 struct bpf_xfrm_state {
5811 __u32 spi; /* Stored in network byte order */
5813 __u16 ext; /* Padding, future use. */
5815 __u32 remote_ipv4; /* Stored in network byte order */
5816 __u32 remote_ipv6[4]; /* Stored in network byte order */
5820 /* Generic BPF return codes which all BPF program types may support.
5821 * The values are binary compatible with their TC_ACT_* counter-part to
5822 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
5825 * XDP is handled seprately, see XDP_*.
5833 /* >127 are reserved for prog type specific return codes.
5835 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
5836 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
5837 * changed and should be routed based on its new L3 header.
5838 * (This is an L3 redirect, as opposed to L2 redirect
5839 * represented by BPF_REDIRECT above).
5841 BPF_LWT_REROUTE = 128,
5851 /* IP address also allows 1 and 2 bytes access */
5854 __u32 src_port; /* host byte order */
5855 __be16 dst_port; /* network byte order */
5856 __u16 :16; /* zero padding */
5860 __s32 rx_queue_mapping;
5863 struct bpf_tcp_sock {
5864 __u32 snd_cwnd; /* Sending congestion window */
5865 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
5867 __u32 snd_ssthresh; /* Slow start size threshold */
5868 __u32 rcv_nxt; /* What we want to receive next */
5869 __u32 snd_nxt; /* Next sequence we send */
5870 __u32 snd_una; /* First byte we want an ack for */
5871 __u32 mss_cache; /* Cached effective mss, not including SACKS */
5872 __u32 ecn_flags; /* ECN status bits. */
5873 __u32 rate_delivered; /* saved rate sample: packets delivered */
5874 __u32 rate_interval_us; /* saved rate sample: time elapsed */
5875 __u32 packets_out; /* Packets which are "in flight" */
5876 __u32 retrans_out; /* Retransmitted packets out */
5877 __u32 total_retrans; /* Total retransmits for entire connection */
5878 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
5879 * total number of segments in.
5881 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
5882 * total number of data segments in.
5884 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
5885 * The total number of segments sent.
5887 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
5888 * total number of data segments sent.
5890 __u32 lost_out; /* Lost packets */
5891 __u32 sacked_out; /* SACK'd packets */
5892 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
5893 * sum(delta(rcv_nxt)), or how many bytes
5896 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
5897 * sum(delta(snd_una)), or how many bytes
5900 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
5901 * total number of DSACK blocks received
5903 __u32 delivered; /* Total data packets delivered incl. rexmits */
5904 __u32 delivered_ce; /* Like the above but only ECE marked packets */
5905 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */
5908 struct bpf_sock_tuple {
5925 struct bpf_xdp_sock {
5929 #define XDP_PACKET_HEADROOM 256
5931 /* User return codes for XDP prog type.
5932 * A valid XDP program must return one of these defined values. All other
5933 * return codes are reserved for future use. Unknown return codes will
5934 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
5944 /* user accessible metadata for XDP packet hook
5945 * new fields must be added to the end of this structure
5951 /* Below access go through struct xdp_rxq_info */
5952 __u32 ingress_ifindex; /* rxq->dev->ifindex */
5953 __u32 rx_queue_index; /* rxq->queue_index */
5955 __u32 egress_ifindex; /* txq->dev->ifindex */
5958 /* DEVMAP map-value layout
5960 * The struct data-layout of map-value is a configuration interface.
5961 * New members can only be added to the end of this structure.
5963 struct bpf_devmap_val {
5964 __u32 ifindex; /* device index */
5966 int fd; /* prog fd on map write */
5967 __u32 id; /* prog id on map read */
5971 /* CPUMAP map-value layout
5973 * The struct data-layout of map-value is a configuration interface.
5974 * New members can only be added to the end of this structure.
5976 struct bpf_cpumap_val {
5977 __u32 qsize; /* queue size to remote target CPU */
5979 int fd; /* prog fd on map write */
5980 __u32 id; /* prog id on map read */
5989 /* user accessible metadata for SK_MSG packet hook, new fields must
5990 * be added to the end of this structure
5993 __bpf_md_ptr(void *, data);
5994 __bpf_md_ptr(void *, data_end);
5997 __u32 remote_ip4; /* Stored in network byte order */
5998 __u32 local_ip4; /* Stored in network byte order */
5999 __u32 remote_ip6[4]; /* Stored in network byte order */
6000 __u32 local_ip6[4]; /* Stored in network byte order */
6001 __u32 remote_port; /* Stored in network byte order */
6002 __u32 local_port; /* stored in host byte order */
6003 __u32 size; /* Total size of sk_msg */
6005 __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */
6008 struct sk_reuseport_md {
6010 * Start of directly accessible data. It begins from
6011 * the tcp/udp header.
6013 __bpf_md_ptr(void *, data);
6014 /* End of directly accessible data */
6015 __bpf_md_ptr(void *, data_end);
6017 * Total length of packet (starting from the tcp/udp header).
6018 * Note that the directly accessible bytes (data_end - data)
6019 * could be less than this "len". Those bytes could be
6020 * indirectly read by a helper "bpf_skb_load_bytes()".
6024 * Eth protocol in the mac header (network byte order). e.g.
6025 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
6028 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
6029 __u32 bind_inany; /* Is sock bound to an INANY address? */
6030 __u32 hash; /* A hash of the packet 4 tuples */
6031 /* When reuse->migrating_sk is NULL, it is selecting a sk for the
6032 * new incoming connection request (e.g. selecting a listen sk for
6033 * the received SYN in the TCP case). reuse->sk is one of the sk
6034 * in the reuseport group. The bpf prog can use reuse->sk to learn
6035 * the local listening ip/port without looking into the skb.
6037 * When reuse->migrating_sk is not NULL, reuse->sk is closed and
6038 * reuse->migrating_sk is the socket that needs to be migrated
6039 * to another listening socket. migrating_sk could be a fullsock
6040 * sk that is fully established or a reqsk that is in-the-middle
6041 * of 3-way handshake.
6043 __bpf_md_ptr(struct bpf_sock *, sk);
6044 __bpf_md_ptr(struct bpf_sock *, migrating_sk);
6047 #define BPF_TAG_SIZE 8
6049 struct bpf_prog_info {
6052 __u8 tag[BPF_TAG_SIZE];
6053 __u32 jited_prog_len;
6054 __u32 xlated_prog_len;
6055 __aligned_u64 jited_prog_insns;
6056 __aligned_u64 xlated_prog_insns;
6057 __u64 load_time; /* ns since boottime */
6058 __u32 created_by_uid;
6060 __aligned_u64 map_ids;
6061 char name[BPF_OBJ_NAME_LEN];
6063 __u32 gpl_compatible:1;
6064 __u32 :31; /* alignment pad */
6067 __u32 nr_jited_ksyms;
6068 __u32 nr_jited_func_lens;
6069 __aligned_u64 jited_ksyms;
6070 __aligned_u64 jited_func_lens;
6072 __u32 func_info_rec_size;
6073 __aligned_u64 func_info;
6076 __aligned_u64 line_info;
6077 __aligned_u64 jited_line_info;
6078 __u32 nr_jited_line_info;
6079 __u32 line_info_rec_size;
6080 __u32 jited_line_info_rec_size;
6082 __aligned_u64 prog_tags;
6085 __u64 recursion_misses;
6086 __u32 verified_insns;
6087 __u32 attach_btf_obj_id;
6088 __u32 attach_btf_id;
6089 } __attribute__((aligned(8)));
6091 struct bpf_map_info {
6098 char name[BPF_OBJ_NAME_LEN];
6100 __u32 btf_vmlinux_value_type_id;
6104 __u32 btf_key_type_id;
6105 __u32 btf_value_type_id;
6106 __u32 :32; /* alignment pad */
6108 } __attribute__((aligned(8)));
6110 struct bpf_btf_info {
6117 } __attribute__((aligned(8)));
6119 struct bpf_link_info {
6125 __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
6126 __u32 tp_name_len; /* in/out: tp_name buffer len */
6130 __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
6131 __u32 target_btf_id; /* BTF type id inside the object */
6138 __aligned_u64 target_name; /* in/out: target_name buffer ptr */
6139 __u32 target_name_len; /* in/out: target_name buffer len */
6154 } __attribute__((aligned(8)));
6156 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
6157 * by user and intended to be used by socket (e.g. to bind to, depends on
6160 struct bpf_sock_addr {
6161 __u32 user_family; /* Allows 4-byte read, but no write. */
6162 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
6163 * Stored in network byte order.
6165 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
6166 * Stored in network byte order.
6168 __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write.
6169 * Stored in network byte order
6171 __u32 family; /* Allows 4-byte read, but no write */
6172 __u32 type; /* Allows 4-byte read, but no write */
6173 __u32 protocol; /* Allows 4-byte read, but no write */
6174 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
6175 * Stored in network byte order.
6177 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
6178 * Stored in network byte order.
6180 __bpf_md_ptr(struct bpf_sock *, sk);
6183 /* User bpf_sock_ops struct to access socket values and specify request ops
6184 * and their replies.
6185 * Some of this fields are in network (bigendian) byte order and may need
6186 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
6187 * New fields can only be added at the end of this structure
6189 struct bpf_sock_ops {
6192 __u32 args[4]; /* Optionally passed to bpf program */
6193 __u32 reply; /* Returned by bpf program */
6194 __u32 replylong[4]; /* Optionally returned by bpf prog */
6197 __u32 remote_ip4; /* Stored in network byte order */
6198 __u32 local_ip4; /* Stored in network byte order */
6199 __u32 remote_ip6[4]; /* Stored in network byte order */
6200 __u32 local_ip6[4]; /* Stored in network byte order */
6201 __u32 remote_port; /* Stored in network byte order */
6202 __u32 local_port; /* stored in host byte order */
6203 __u32 is_fullsock; /* Some TCP fields are only valid if
6204 * there is a full socket. If not, the
6205 * fields read as zero.
6208 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
6209 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
6218 __u32 rate_delivered;
6219 __u32 rate_interval_us;
6222 __u32 total_retrans;
6226 __u32 data_segs_out;
6230 __u64 bytes_received;
6232 __bpf_md_ptr(struct bpf_sock *, sk);
6233 /* [skb_data, skb_data_end) covers the whole TCP header.
6235 * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
6236 * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the
6237 * header has not been written.
6238 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
6239 * been written so far.
6240 * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes
6242 * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
6245 * bpf_load_hdr_opt() can also be used to read a particular option.
6247 __bpf_md_ptr(void *, skb_data);
6248 __bpf_md_ptr(void *, skb_data_end);
6249 __u32 skb_len; /* The total length of a packet.
6250 * It includes the header, options,
6253 __u32 skb_tcp_flags; /* tcp_flags of the header. It provides
6254 * an easy way to check for tcp_flags
6255 * without parsing skb_data.
6257 * In particular, the skb_tcp_flags
6258 * will still be available in
6259 * BPF_SOCK_OPS_HDR_OPT_LEN even though
6260 * the outgoing header has not
6265 /* Definitions for bpf_sock_ops_cb_flags */
6267 BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
6268 BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
6269 BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
6270 BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
6271 /* Call bpf for all received TCP headers. The bpf prog will be
6272 * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6274 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6275 * for the header option related helpers that will be useful
6276 * to the bpf programs.
6278 * It could be used at the client/active side (i.e. connect() side)
6279 * when the server told it that the server was in syncookie
6280 * mode and required the active side to resend the bpf-written
6281 * options. The active side can keep writing the bpf-options until
6282 * it received a valid packet from the server side to confirm
6283 * the earlier packet (and options) has been received. The later
6284 * example patch is using it like this at the active side when the
6285 * server is in syncookie mode.
6287 * The bpf prog will usually turn this off in the common cases.
6289 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
6290 /* Call bpf when kernel has received a header option that
6291 * the kernel cannot handle. The bpf prog will be called under
6292 * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
6294 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6295 * for the header option related helpers that will be useful
6296 * to the bpf programs.
6298 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
6299 /* Call bpf when the kernel is writing header options for the
6300 * outgoing packet. The bpf prog will first be called
6301 * to reserve space in a skb under
6302 * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then
6303 * the bpf prog will be called to write the header option(s)
6304 * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6306 * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
6307 * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
6308 * related helpers that will be useful to the bpf programs.
6310 * The kernel gets its chance to reserve space and write
6311 * options first before the BPF program does.
6313 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
6314 /* Mask of all currently supported cb flags */
6315 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
6318 /* List of known BPF sock_ops operators.
6319 * New entries can only be added at the end
6323 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
6324 * -1 if default value should be used
6326 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
6327 * window (in packets) or -1 if default
6328 * value should be used
6330 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
6331 * active connection is initialized
6333 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
6334 * active connection is
6337 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
6338 * passive connection is
6341 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
6344 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
6345 * based on the path and may be
6346 * dependent on the congestion control
6347 * algorithm. In general it indicates
6348 * a congestion threshold. RTTs above
6349 * this indicate congestion
6351 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
6352 * Arg1: value of icsk_retransmits
6353 * Arg2: value of icsk_rto
6354 * Arg3: whether RTO has expired
6356 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
6357 * Arg1: sequence number of 1st byte
6359 * Arg3: return value of
6360 * tcp_transmit_skb (0 => success)
6362 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
6366 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
6367 * socket transition to LISTEN state.
6369 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
6371 BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
6372 * It will be called to handle
6373 * the packets received at
6374 * an already established
6377 * sock_ops->skb_data:
6378 * Referring to the received skb.
6379 * It covers the TCP header only.
6381 * bpf_load_hdr_opt() can also
6382 * be used to search for a
6383 * particular option.
6385 BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the
6386 * header option later in
6387 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6388 * Arg1: bool want_cookie. (in
6389 * writing SYNACK only)
6391 * sock_ops->skb_data:
6392 * Not available because no header has
6395 * sock_ops->skb_tcp_flags:
6396 * The tcp_flags of the
6397 * outgoing skb. (e.g. SYN, ACK, FIN).
6399 * bpf_reserve_hdr_opt() should
6400 * be used to reserve space.
6402 BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options
6403 * Arg1: bool want_cookie. (in
6404 * writing SYNACK only)
6406 * sock_ops->skb_data:
6407 * Referring to the outgoing skb.
6408 * It covers the TCP header
6409 * that has already been written
6410 * by the kernel and the
6411 * earlier bpf-progs.
6413 * sock_ops->skb_tcp_flags:
6414 * The tcp_flags of the outgoing
6415 * skb. (e.g. SYN, ACK, FIN).
6417 * bpf_store_hdr_opt() should
6418 * be used to write the
6421 * bpf_load_hdr_opt() can also
6422 * be used to search for a
6423 * particular option that
6424 * has already been written
6425 * by the kernel or the
6426 * earlier bpf-progs.
6430 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
6431 * changes between the TCP and BPF versions. Ideally this should never happen.
6432 * If it does, we need to add code to convert them before calling
6433 * the BPF sock_ops function.
6436 BPF_TCP_ESTABLISHED = 1,
6446 BPF_TCP_CLOSING, /* Now a valid state */
6447 BPF_TCP_NEW_SYN_RECV,
6449 BPF_TCP_MAX_STATES /* Leave at the end! */
6453 TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
6454 TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
6455 TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */
6456 TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */
6457 /* Copy the SYN pkt to optval
6459 * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the
6460 * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
6461 * to only getting from the saved_syn. It can either get the
6464 * 1. the just-received SYN packet (only available when writing the
6465 * SYNACK). It will be useful when it is not necessary to
6466 * save the SYN packet for latter use. It is also the only way
6467 * to get the SYN during syncookie mode because the syn
6468 * packet cannot be saved during syncookie.
6472 * 2. the earlier saved syn which was done by
6473 * bpf_setsockopt(TCP_SAVE_SYN).
6475 * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
6476 * SYN packet is obtained.
6478 * If the bpf-prog does not need the IP[46] header, the
6479 * bpf-prog can avoid parsing the IP header by using
6480 * TCP_BPF_SYN. Otherwise, the bpf-prog can get both
6481 * IP[46] and TCP header by using TCP_BPF_SYN_IP.
6483 * >0: Total number of bytes copied
6484 * -ENOSPC: Not enough space in optval. Only optlen number of
6486 * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
6487 * is not saved by setsockopt(TCP_SAVE_SYN).
6489 TCP_BPF_SYN = 1005, /* Copy the TCP header */
6490 TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
6491 TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
6495 BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
6498 /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
6499 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6502 BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the
6503 * total option spaces
6504 * required for an established
6505 * sk in order to calculate the
6506 * MSS. No skb is actually
6509 BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode
6510 * when sending a SYN.
6514 struct bpf_perf_event_value {
6521 BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
6522 BPF_DEVCG_ACC_READ = (1ULL << 1),
6523 BPF_DEVCG_ACC_WRITE = (1ULL << 2),
6527 BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
6528 BPF_DEVCG_DEV_CHAR = (1ULL << 1),
6531 struct bpf_cgroup_dev_ctx {
6532 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
6538 struct bpf_raw_tracepoint_args {
6542 /* DIRECT: Skip the FIB rules and go to FIB table associated with device
6543 * OUTPUT: Do lookup from egress perspective; default is ingress
6546 BPF_FIB_LOOKUP_DIRECT = (1U << 0),
6547 BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
6551 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
6552 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
6553 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
6554 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
6555 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
6556 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
6557 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
6558 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
6559 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
6562 struct bpf_fib_lookup {
6563 /* input: network family for lookup (AF_INET, AF_INET6)
6564 * output: network family of egress nexthop
6568 /* set if lookup is to consider L4 data - e.g., FIB rules */
6573 union { /* used for MTU check */
6574 /* input to lookup */
6575 __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */
6577 /* output: MTU value */
6580 /* input: L3 device index for lookup
6581 * output: device index from FIB lookup
6586 /* inputs to lookup */
6587 __u8 tos; /* AF_INET */
6588 __be32 flowinfo; /* AF_INET6, flow_label + priority */
6590 /* output: metric of fib result (IPv4/IPv6 only) */
6596 __u32 ipv6_src[4]; /* in6_addr; network order */
6599 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
6600 * network header. output: bpf_fib_lookup sets to gateway address
6601 * if FIB lookup returns gateway route
6605 __u32 ipv6_dst[4]; /* in6_addr; network order */
6609 __be16 h_vlan_proto;
6611 __u8 smac[6]; /* ETH_ALEN */
6612 __u8 dmac[6]; /* ETH_ALEN */
6615 struct bpf_redir_neigh {
6616 /* network family for lookup (AF_INET, AF_INET6) */
6618 /* network address of nexthop; skips fib lookup to find gateway */
6621 __u32 ipv6_nh[4]; /* in6_addr; network order */
6625 /* bpf_check_mtu flags*/
6626 enum bpf_check_mtu_flags {
6627 BPF_MTU_CHK_SEGS = (1U << 0),
6630 enum bpf_check_mtu_ret {
6631 BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */
6632 BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */
6633 BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */
6636 enum bpf_task_fd_type {
6637 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
6638 BPF_FD_TYPE_TRACEPOINT, /* tp name */
6639 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */
6640 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */
6641 BPF_FD_TYPE_UPROBE, /* filename + offset */
6642 BPF_FD_TYPE_URETPROBE, /* filename + offset */
6646 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
6647 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
6648 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
6651 struct bpf_flow_keys {
6654 __u16 addr_proto; /* ETH_P_* of valid addrs */
6668 __u32 ipv6_src[4]; /* in6_addr; network order */
6669 __u32 ipv6_dst[4]; /* in6_addr; network order */
6676 struct bpf_func_info {
6681 #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
6682 #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
6684 struct bpf_line_info {
6686 __u32 file_name_off;
6691 struct bpf_spin_lock {
6698 } __attribute__((aligned(8)));
6703 } __attribute__((aligned(8)));
6706 __u32 write; /* Sysctl is being read (= 0) or written (= 1).
6707 * Allows 1,2,4-byte read, but no write.
6709 __u32 file_pos; /* Sysctl file position to read from, write to.
6710 * Allows 1,2,4-byte read an 4-byte write.
6714 struct bpf_sockopt {
6715 __bpf_md_ptr(struct bpf_sock *, sk);
6716 __bpf_md_ptr(void *, optval);
6717 __bpf_md_ptr(void *, optval_end);
6725 struct bpf_pidns_info {
6730 /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
6731 struct bpf_sk_lookup {
6733 __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
6734 __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
6737 __u32 family; /* Protocol family (AF_INET, AF_INET6) */
6738 __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
6739 __u32 remote_ip4; /* Network byte order */
6740 __u32 remote_ip6[4]; /* Network byte order */
6741 __be16 remote_port; /* Network byte order */
6742 __u16 :16; /* Zero padding */
6743 __u32 local_ip4; /* Network byte order */
6744 __u32 local_ip6[4]; /* Network byte order */
6745 __u32 local_port; /* Host byte order */
6746 __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
6750 * struct btf_ptr is used for typed pointer representation; the
6751 * type id is used to render the pointer data as the appropriate type
6752 * via the bpf_snprintf_btf() helper described above. A flags field -
6753 * potentially to specify additional details about the BTF pointer
6754 * (rather than its mode of display) - is included for future use.
6755 * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
6760 __u32 flags; /* BTF ptr flags; unused at present. */
6764 * Flags to control bpf_snprintf_btf() behaviour.
6765 * - BTF_F_COMPACT: no formatting around type information
6766 * - BTF_F_NONAME: no struct/union member names/types
6767 * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
6768 * equivalent to %px.
6769 * - BTF_F_ZERO: show zero-valued struct/union members; they
6770 * are not displayed by default
6773 BTF_F_COMPACT = (1ULL << 0),
6774 BTF_F_NONAME = (1ULL << 1),
6775 BTF_F_PTR_RAW = (1ULL << 2),
6776 BTF_F_ZERO = (1ULL << 3),
6779 /* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
6780 * has to be adjusted by relocations. It is emitted by llvm and passed to
6781 * libbpf and later to the kernel.
6783 enum bpf_core_relo_kind {
6784 BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */
6785 BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */
6786 BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */
6787 BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
6788 BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
6789 BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
6790 BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
6791 BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */
6792 BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */
6793 BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
6794 BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
6795 BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
6796 BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
6800 * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf
6801 * and from libbpf to the kernel.
6803 * CO-RE relocation captures the following data:
6804 * - insn_off - instruction offset (in bytes) within a BPF program that needs
6805 * its insn->imm field to be relocated with actual field info;
6806 * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
6808 * - access_str_off - offset into corresponding .BTF string section. String
6809 * interpretation depends on specific relocation kind:
6810 * - for field-based relocations, string encodes an accessed field using
6811 * a sequence of field and array indices, separated by colon (:). It's
6812 * conceptually very close to LLVM's getelementptr ([0]) instruction's
6813 * arguments for identifying offset to a field.
6814 * - for type-based relocations, strings is expected to be just "0";
6815 * - for enum value-based relocations, string contains an index of enum
6816 * value within its enum type;
6817 * - kind - one of enum bpf_core_relo_kind;
6827 * struct sample *s = ...;
6828 * int *x = &s->a; // encoded as "0:0" (a is field #0)
6829 * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
6830 * // b is field #0 inside anon struct, accessing elem #5)
6831 * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
6833 * type_id for all relocs in this example will capture BTF type id of
6836 * Such relocation is emitted when using __builtin_preserve_access_index()
6837 * Clang built-in, passing expression that captures field address, e.g.:
6839 * bpf_probe_read(&dst, sizeof(dst),
6840 * __builtin_preserve_access_index(&src->a.b.c));
6842 * In this case Clang will emit field relocation recording necessary data to
6843 * be able to find offset of embedded `a.b.c` field within `src` struct.
6845 * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
6847 struct bpf_core_relo {
6850 __u32 access_str_off;
6851 enum bpf_core_relo_kind kind;
6854 #endif /* _UAPI__LINUX_BPF_H__ */