2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25 MODULE_DESCRIPTION("TC BPF based classifier");
28 struct list_head plist;
33 struct sk_filter *filter;
34 struct sock_filter *bpf_ops;
36 struct tcf_result res;
37 struct list_head link;
42 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
43 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
44 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
45 [TCA_BPF_OPS] = { .type = NLA_BINARY,
46 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
49 static const struct tcf_ext_map bpf_ext_map = {
50 .action = TCA_BPF_ACT,
51 .police = TCA_BPF_POLICE,
54 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
55 struct tcf_result *res)
57 struct cls_bpf_head *head = tp->root;
58 struct cls_bpf_prog *prog;
61 list_for_each_entry(prog, &head->plist, link) {
62 int filter_res = SK_RUN_FILTER(prog->filter, skb);
69 res->classid = filter_res;
71 ret = tcf_exts_exec(skb, &prog->exts, res);
81 static int cls_bpf_init(struct tcf_proto *tp)
83 struct cls_bpf_head *head;
85 head = kzalloc(sizeof(*head), GFP_KERNEL);
89 INIT_LIST_HEAD(&head->plist);
95 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
97 tcf_unbind_filter(tp, &prog->res);
98 tcf_exts_destroy(tp, &prog->exts);
100 sk_unattached_filter_destroy(prog->filter);
102 kfree(prog->bpf_ops);
106 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
108 struct cls_bpf_head *head = tp->root;
109 struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
111 list_for_each_entry(prog, &head->plist, link) {
114 list_del(&prog->link);
117 cls_bpf_delete_prog(tp, prog);
125 static void cls_bpf_destroy(struct tcf_proto *tp)
127 struct cls_bpf_head *head = tp->root;
128 struct cls_bpf_prog *prog, *tmp;
130 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
131 list_del(&prog->link);
132 cls_bpf_delete_prog(tp, prog);
138 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
140 struct cls_bpf_head *head = tp->root;
141 struct cls_bpf_prog *prog;
142 unsigned long ret = 0UL;
147 list_for_each_entry(prog, &head->plist, link) {
148 if (prog->handle == handle) {
149 ret = (unsigned long) prog;
157 static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
161 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
162 struct cls_bpf_prog *prog,
163 unsigned long base, struct nlattr **tb,
166 struct sock_filter *bpf_ops, *bpf_old;
167 struct tcf_exts exts;
168 struct sock_fprog tmp;
169 struct sk_filter *fp, *fp_old;
170 u16 bpf_size, bpf_len;
174 if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
177 ret = tcf_exts_validate(net, tp, tb, est, &exts, &bpf_ext_map);
181 classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
182 bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
183 if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
188 bpf_size = bpf_len * sizeof(*bpf_ops);
189 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
190 if (bpf_ops == NULL) {
195 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
198 tmp.filter = (struct sock_filter __user *) bpf_ops;
200 ret = sk_unattached_filter_create(&fp, &tmp);
205 fp_old = prog->filter;
206 bpf_old = prog->bpf_ops;
208 prog->bpf_len = bpf_len;
209 prog->bpf_ops = bpf_ops;
211 prog->res.classid = classid;
214 tcf_bind_filter(tp, &prog->res, base);
215 tcf_exts_change(tp, &prog->exts, &exts);
218 sk_unattached_filter_destroy(fp_old);
227 tcf_exts_destroy(tp, &exts);
231 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
232 struct cls_bpf_head *head)
234 unsigned int i = 0x80000000;
237 if (++head->hgen == 0x7FFFFFFF)
239 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
241 pr_err("Insufficient number of handles\n");
246 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
247 struct tcf_proto *tp, unsigned long base,
248 u32 handle, struct nlattr **tca,
251 struct cls_bpf_head *head = tp->root;
252 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
253 struct nlattr *tb[TCA_BPF_MAX + 1];
256 if (tca[TCA_OPTIONS] == NULL)
259 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
264 if (handle && prog->handle != handle)
266 return cls_bpf_modify_existing(net, tp, prog, base, tb,
270 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
275 prog->handle = cls_bpf_grab_new_handle(tp, head);
277 prog->handle = handle;
278 if (prog->handle == 0) {
283 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE]);
288 list_add(&prog->link, &head->plist);
291 *arg = (unsigned long) prog;
295 if (*arg == 0UL && prog)
301 static int cls_bpf_dump(struct tcf_proto *tp, unsigned long fh,
302 struct sk_buff *skb, struct tcmsg *tm)
304 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
305 struct nlattr *nest, *nla;
310 tm->tcm_handle = prog->handle;
312 nest = nla_nest_start(skb, TCA_OPTIONS);
314 goto nla_put_failure;
316 if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
317 goto nla_put_failure;
318 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
319 goto nla_put_failure;
321 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
322 sizeof(struct sock_filter));
324 goto nla_put_failure;
326 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
328 if (tcf_exts_dump(skb, &prog->exts, &bpf_ext_map) < 0)
329 goto nla_put_failure;
331 nla_nest_end(skb, nest);
333 if (tcf_exts_dump_stats(skb, &prog->exts, &bpf_ext_map) < 0)
334 goto nla_put_failure;
339 nla_nest_cancel(skb, nest);
343 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
345 struct cls_bpf_head *head = tp->root;
346 struct cls_bpf_prog *prog;
348 list_for_each_entry(prog, &head->plist, link) {
349 if (arg->count < arg->skip)
351 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
360 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
362 .owner = THIS_MODULE,
363 .classify = cls_bpf_classify,
364 .init = cls_bpf_init,
365 .destroy = cls_bpf_destroy,
368 .change = cls_bpf_change,
369 .delete = cls_bpf_delete,
370 .walk = cls_bpf_walk,
371 .dump = cls_bpf_dump,
374 static int __init cls_bpf_init_mod(void)
376 return register_tcf_proto_ops(&cls_bpf_ops);
379 static void __exit cls_bpf_exit_mod(void)
381 unregister_tcf_proto_ops(&cls_bpf_ops);
384 module_init(cls_bpf_init_mod);
385 module_exit(cls_bpf_exit_mod);