u64 seq_num;
};
-int bpf_iter_reg_target(struct bpf_iter_reg *reg_info);
+int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const char *target);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
struct bpf_iter_target_info {
struct list_head list;
- const char *target;
- const struct seq_operations *seq_ops;
- bpf_iter_init_seq_priv_t init_seq_private;
- bpf_iter_fini_seq_priv_t fini_seq_private;
- u32 seq_priv_size;
+ const struct bpf_iter_reg *reg_info;
u32 btf_id; /* cached value */
};
iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
target_private);
- if (iter_priv->tinfo->fini_seq_private)
- iter_priv->tinfo->fini_seq_private(seq->private);
+ if (iter_priv->tinfo->reg_info->fini_seq_private)
+ iter_priv->tinfo->reg_info->fini_seq_private(seq->private);
bpf_prog_put(iter_priv->prog);
seq->private = iter_priv;
.release = iter_release,
};
-int bpf_iter_reg_target(struct bpf_iter_reg *reg_info)
+/* The argument reg_info will be cached in bpf_iter_target_info.
+ * The common practice is to declare target reg_info as
+ * a const static variable and passed as an argument to
+ * bpf_iter_reg_target().
+ */
+int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
{
struct bpf_iter_target_info *tinfo;
if (!tinfo)
return -ENOMEM;
- tinfo->target = reg_info->target;
- tinfo->seq_ops = reg_info->seq_ops;
- tinfo->init_seq_private = reg_info->init_seq_private;
- tinfo->fini_seq_private = reg_info->fini_seq_private;
- tinfo->seq_priv_size = reg_info->seq_priv_size;
+ tinfo->reg_info = reg_info;
INIT_LIST_HEAD(&tinfo->list);
mutex_lock(&targets_mutex);
mutex_lock(&targets_mutex);
list_for_each_entry(tinfo, &targets, list) {
- if (!strcmp(target, tinfo->target)) {
+ if (!strcmp(target, tinfo->reg_info->target)) {
list_del(&tinfo->list);
kfree(tinfo);
found = true;
supported = true;
break;
}
- if (!strcmp(attach_fname + prefix_len, tinfo->target)) {
+ if (!strcmp(attach_fname + prefix_len, tinfo->reg_info->target)) {
cache_btf_id(tinfo, prog);
supported = true;
break;
tinfo = link->tinfo;
total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) +
- tinfo->seq_priv_size;
- priv_data = __seq_open_private(file, tinfo->seq_ops, total_priv_dsize);
+ tinfo->reg_info->seq_priv_size;
+ priv_data = __seq_open_private(file, tinfo->reg_info->seq_ops,
+ total_priv_dsize);
if (!priv_data) {
err = -ENOMEM;
goto release_prog;
}
- if (tinfo->init_seq_private) {
- err = tinfo->init_seq_private(priv_data->target_private);
+ if (tinfo->reg_info->init_seq_private) {
+ err = tinfo->reg_info->init_seq_private(priv_data->target_private);
if (err)
goto release_seq_file;
}
.show = bpf_map_seq_show,
};
+static const struct bpf_iter_reg bpf_map_reg_info = {
+ .target = "bpf_map",
+ .seq_ops = &bpf_map_seq_ops,
+ .init_seq_private = NULL,
+ .fini_seq_private = NULL,
+ .seq_priv_size = sizeof(struct bpf_iter_seq_map_info),
+};
+
static int __init bpf_map_iter_init(void)
{
- struct bpf_iter_reg reg_info = {
- .target = "bpf_map",
- .seq_ops = &bpf_map_seq_ops,
- .init_seq_private = NULL,
- .fini_seq_private = NULL,
- .seq_priv_size = sizeof(struct bpf_iter_seq_map_info),
- };
-
- return bpf_iter_reg_target(®_info);
+ return bpf_iter_reg_target(&bpf_map_reg_info);
}
late_initcall(bpf_map_iter_init);
.show = task_file_seq_show,
};
+static const struct bpf_iter_reg task_reg_info = {
+ .target = "task",
+ .seq_ops = &task_seq_ops,
+ .init_seq_private = init_seq_pidns,
+ .fini_seq_private = fini_seq_pidns,
+ .seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
+};
+
+static const struct bpf_iter_reg task_file_reg_info = {
+ .target = "task_file",
+ .seq_ops = &task_file_seq_ops,
+ .init_seq_private = init_seq_pidns,
+ .fini_seq_private = fini_seq_pidns,
+ .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
+};
+
static int __init task_iter_init(void)
{
- struct bpf_iter_reg task_file_reg_info = {
- .target = "task_file",
- .seq_ops = &task_file_seq_ops,
- .init_seq_private = init_seq_pidns,
- .fini_seq_private = fini_seq_pidns,
- .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
- };
- struct bpf_iter_reg task_reg_info = {
- .target = "task",
- .seq_ops = &task_seq_ops,
- .init_seq_private = init_seq_pidns,
- .fini_seq_private = fini_seq_pidns,
- .seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
- };
int ret;
ret = bpf_iter_reg_target(&task_reg_info);
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
+static const struct bpf_iter_reg ipv6_route_reg_info = {
+ .target = "ipv6_route",
+ .seq_ops = &ipv6_route_seq_ops,
+ .init_seq_private = bpf_iter_init_seq_net,
+ .fini_seq_private = bpf_iter_fini_seq_net,
+ .seq_priv_size = sizeof(struct ipv6_route_iter),
+};
+
static int __init bpf_iter_register(void)
{
- struct bpf_iter_reg reg_info = {
- .target = "ipv6_route",
- .seq_ops = &ipv6_route_seq_ops,
- .init_seq_private = bpf_iter_init_seq_net,
- .fini_seq_private = bpf_iter_fini_seq_net,
- .seq_priv_size = sizeof(struct ipv6_route_iter),
- };
-
- return bpf_iter_reg_target(®_info);
+ return bpf_iter_reg_target(&ipv6_route_reg_info);
}
static void bpf_iter_unregister(void)
};
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+static const struct bpf_iter_reg netlink_reg_info = {
+ .target = "netlink",
+ .seq_ops = &netlink_seq_ops,
+ .init_seq_private = bpf_iter_init_seq_net,
+ .fini_seq_private = bpf_iter_fini_seq_net,
+ .seq_priv_size = sizeof(struct nl_seq_iter),
+};
+
static int __init bpf_iter_register(void)
{
- struct bpf_iter_reg reg_info = {
- .target = "netlink",
- .seq_ops = &netlink_seq_ops,
- .init_seq_private = bpf_iter_init_seq_net,
- .fini_seq_private = bpf_iter_fini_seq_net,
- .seq_priv_size = sizeof(struct nl_seq_iter),
- };
-
- return bpf_iter_reg_target(®_info);
+ return bpf_iter_reg_target(&netlink_reg_info);
}
#endif