1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/act_api.h>
5 #include <net/flow_offload.h>
6 #include <linux/rtnetlink.h>
7 #include <linux/mutex.h>
8 #include <linux/rhashtable.h>
10 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
12 struct flow_rule *rule;
15 rule = kzalloc(struct_size(rule, action.entries, num_actions),
20 rule->action.num_entries = num_actions;
21 /* Pre-fill each action hw_stats with DONT_CARE.
22 * Caller can override this if it wants stats for a given action.
24 for (i = 0; i < num_actions; i++)
25 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
29 EXPORT_SYMBOL(flow_rule_alloc);
31 struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
33 struct flow_offload_action *fl_action;
36 fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
41 fl_action->action.num_entries = num_actions;
42 /* Pre-fill each action hw_stats with DONT_CARE.
43 * Caller can override this if it wants stats for a given action.
45 for (i = 0; i < num_actions; i++)
46 fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
51 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
52 const struct flow_match *__m = &(__rule)->match; \
53 struct flow_dissector *__d = (__m)->dissector; \
55 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
56 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
58 void flow_rule_match_meta(const struct flow_rule *rule,
59 struct flow_match_meta *out)
61 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
63 EXPORT_SYMBOL(flow_rule_match_meta);
65 void flow_rule_match_basic(const struct flow_rule *rule,
66 struct flow_match_basic *out)
68 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
70 EXPORT_SYMBOL(flow_rule_match_basic);
72 void flow_rule_match_control(const struct flow_rule *rule,
73 struct flow_match_control *out)
75 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
77 EXPORT_SYMBOL(flow_rule_match_control);
79 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
80 struct flow_match_eth_addrs *out)
82 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
84 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
86 void flow_rule_match_vlan(const struct flow_rule *rule,
87 struct flow_match_vlan *out)
89 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
91 EXPORT_SYMBOL(flow_rule_match_vlan);
93 void flow_rule_match_cvlan(const struct flow_rule *rule,
94 struct flow_match_vlan *out)
96 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
98 EXPORT_SYMBOL(flow_rule_match_cvlan);
100 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
101 struct flow_match_ipv4_addrs *out)
103 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
105 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
107 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
108 struct flow_match_ipv6_addrs *out)
110 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
112 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
114 void flow_rule_match_ip(const struct flow_rule *rule,
115 struct flow_match_ip *out)
117 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
119 EXPORT_SYMBOL(flow_rule_match_ip);
121 void flow_rule_match_ports(const struct flow_rule *rule,
122 struct flow_match_ports *out)
124 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
126 EXPORT_SYMBOL(flow_rule_match_ports);
128 void flow_rule_match_tcp(const struct flow_rule *rule,
129 struct flow_match_tcp *out)
131 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
133 EXPORT_SYMBOL(flow_rule_match_tcp);
135 void flow_rule_match_icmp(const struct flow_rule *rule,
136 struct flow_match_icmp *out)
138 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
140 EXPORT_SYMBOL(flow_rule_match_icmp);
142 void flow_rule_match_mpls(const struct flow_rule *rule,
143 struct flow_match_mpls *out)
145 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
147 EXPORT_SYMBOL(flow_rule_match_mpls);
149 void flow_rule_match_enc_control(const struct flow_rule *rule,
150 struct flow_match_control *out)
152 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
154 EXPORT_SYMBOL(flow_rule_match_enc_control);
156 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
157 struct flow_match_ipv4_addrs *out)
159 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
161 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
163 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
164 struct flow_match_ipv6_addrs *out)
166 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
168 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
170 void flow_rule_match_enc_ip(const struct flow_rule *rule,
171 struct flow_match_ip *out)
173 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
175 EXPORT_SYMBOL(flow_rule_match_enc_ip);
177 void flow_rule_match_enc_ports(const struct flow_rule *rule,
178 struct flow_match_ports *out)
180 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
182 EXPORT_SYMBOL(flow_rule_match_enc_ports);
184 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
185 struct flow_match_enc_keyid *out)
187 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
189 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
191 void flow_rule_match_enc_opts(const struct flow_rule *rule,
192 struct flow_match_enc_opts *out)
194 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
196 EXPORT_SYMBOL(flow_rule_match_enc_opts);
198 struct flow_action_cookie *flow_action_cookie_create(void *data,
202 struct flow_action_cookie *cookie;
204 cookie = kmalloc(sizeof(*cookie) + len, gfp);
207 cookie->cookie_len = len;
208 memcpy(cookie->cookie, data, len);
211 EXPORT_SYMBOL(flow_action_cookie_create);
213 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
217 EXPORT_SYMBOL(flow_action_cookie_destroy);
219 void flow_rule_match_ct(const struct flow_rule *rule,
220 struct flow_match_ct *out)
222 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
224 EXPORT_SYMBOL(flow_rule_match_ct);
226 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
227 void *cb_ident, void *cb_priv,
228 void (*release)(void *cb_priv))
230 struct flow_block_cb *block_cb;
232 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
234 return ERR_PTR(-ENOMEM);
237 block_cb->cb_ident = cb_ident;
238 block_cb->cb_priv = cb_priv;
239 block_cb->release = release;
243 EXPORT_SYMBOL(flow_block_cb_alloc);
245 void flow_block_cb_free(struct flow_block_cb *block_cb)
247 if (block_cb->release)
248 block_cb->release(block_cb->cb_priv);
252 EXPORT_SYMBOL(flow_block_cb_free);
254 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
255 flow_setup_cb_t *cb, void *cb_ident)
257 struct flow_block_cb *block_cb;
259 list_for_each_entry(block_cb, &block->cb_list, list) {
260 if (block_cb->cb == cb &&
261 block_cb->cb_ident == cb_ident)
267 EXPORT_SYMBOL(flow_block_cb_lookup);
269 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
271 return block_cb->cb_priv;
273 EXPORT_SYMBOL(flow_block_cb_priv);
275 void flow_block_cb_incref(struct flow_block_cb *block_cb)
279 EXPORT_SYMBOL(flow_block_cb_incref);
281 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
283 return --block_cb->refcnt;
285 EXPORT_SYMBOL(flow_block_cb_decref);
287 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
288 struct list_head *driver_block_list)
290 struct flow_block_cb *block_cb;
292 list_for_each_entry(block_cb, driver_block_list, driver_list) {
293 if (block_cb->cb == cb &&
294 block_cb->cb_ident == cb_ident)
300 EXPORT_SYMBOL(flow_block_cb_is_busy);
302 int flow_block_cb_setup_simple(struct flow_block_offload *f,
303 struct list_head *driver_block_list,
305 void *cb_ident, void *cb_priv,
308 struct flow_block_cb *block_cb;
311 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
314 f->driver_block_list = driver_block_list;
316 switch (f->command) {
317 case FLOW_BLOCK_BIND:
318 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
321 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
322 if (IS_ERR(block_cb))
323 return PTR_ERR(block_cb);
325 flow_block_cb_add(block_cb, f);
326 list_add_tail(&block_cb->driver_list, driver_block_list);
328 case FLOW_BLOCK_UNBIND:
329 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
333 flow_block_cb_remove(block_cb, f);
334 list_del(&block_cb->driver_list);
340 EXPORT_SYMBOL(flow_block_cb_setup_simple);
342 static DEFINE_MUTEX(flow_indr_block_lock);
343 static LIST_HEAD(flow_block_indr_list);
344 static LIST_HEAD(flow_block_indr_dev_list);
345 static LIST_HEAD(flow_indir_dev_list);
347 struct flow_indr_dev {
348 struct list_head list;
349 flow_indr_block_bind_cb_t *cb;
354 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
357 struct flow_indr_dev *indr_dev;
359 indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
364 indr_dev->cb_priv = cb_priv;
365 refcount_set(&indr_dev->refcnt, 1);
370 struct flow_indir_dev_info {
372 struct net_device *dev;
374 enum tc_setup_type type;
375 void (*cleanup)(struct flow_block_cb *block_cb);
376 struct list_head list;
377 enum flow_block_command command;
378 enum flow_block_binder_type binder_type;
379 struct list_head *cb_list;
382 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
384 struct flow_block_offload bo;
385 struct flow_indir_dev_info *cur;
387 list_for_each_entry(cur, &flow_indir_dev_list, list) {
388 memset(&bo, 0, sizeof(bo));
389 bo.command = cur->command;
390 bo.binder_type = cur->binder_type;
391 INIT_LIST_HEAD(&bo.cb_list);
392 cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
393 list_splice(&bo.cb_list, cur->cb_list);
397 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
399 struct flow_indr_dev *indr_dev;
401 mutex_lock(&flow_indr_block_lock);
402 list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
403 if (indr_dev->cb == cb &&
404 indr_dev->cb_priv == cb_priv) {
405 refcount_inc(&indr_dev->refcnt);
406 mutex_unlock(&flow_indr_block_lock);
411 indr_dev = flow_indr_dev_alloc(cb, cb_priv);
413 mutex_unlock(&flow_indr_block_lock);
417 list_add(&indr_dev->list, &flow_block_indr_dev_list);
418 existing_qdiscs_register(cb, cb_priv);
419 mutex_unlock(&flow_indr_block_lock);
421 tcf_action_reoffload_cb(cb, cb_priv, true);
425 EXPORT_SYMBOL(flow_indr_dev_register);
427 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
429 struct list_head *cleanup_list)
431 struct flow_block_cb *this, *next;
433 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
434 if (this->release == release &&
435 this->indr.cb_priv == cb_priv)
436 list_move(&this->indr.list, cleanup_list);
440 static void flow_block_indr_notify(struct list_head *cleanup_list)
442 struct flow_block_cb *this, *next;
444 list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
445 list_del(&this->indr.list);
446 this->indr.cleanup(this);
450 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
451 void (*release)(void *cb_priv))
453 struct flow_indr_dev *this, *next, *indr_dev = NULL;
454 LIST_HEAD(cleanup_list);
456 mutex_lock(&flow_indr_block_lock);
457 list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
458 if (this->cb == cb &&
459 this->cb_priv == cb_priv &&
460 refcount_dec_and_test(&this->refcnt)) {
462 list_del(&indr_dev->list);
468 mutex_unlock(&flow_indr_block_lock);
472 __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
473 mutex_unlock(&flow_indr_block_lock);
475 tcf_action_reoffload_cb(cb, cb_priv, false);
476 flow_block_indr_notify(&cleanup_list);
479 EXPORT_SYMBOL(flow_indr_dev_unregister);
481 static void flow_block_indr_init(struct flow_block_cb *flow_block,
482 struct flow_block_offload *bo,
483 struct net_device *dev, struct Qdisc *sch, void *data,
485 void (*cleanup)(struct flow_block_cb *block_cb))
487 flow_block->indr.binder_type = bo->binder_type;
488 flow_block->indr.data = data;
489 flow_block->indr.cb_priv = cb_priv;
490 flow_block->indr.dev = dev;
491 flow_block->indr.sch = sch;
492 flow_block->indr.cleanup = cleanup;
495 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
496 void *cb_ident, void *cb_priv,
497 void (*release)(void *cb_priv),
498 struct flow_block_offload *bo,
499 struct net_device *dev,
500 struct Qdisc *sch, void *data,
502 void (*cleanup)(struct flow_block_cb *block_cb))
504 struct flow_block_cb *block_cb;
506 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
507 if (IS_ERR(block_cb))
510 flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
511 list_add(&block_cb->indr.list, &flow_block_indr_list);
516 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
518 static struct flow_indir_dev_info *find_indir_dev(void *data)
520 struct flow_indir_dev_info *cur;
522 list_for_each_entry(cur, &flow_indir_dev_list, list) {
523 if (cur->data == data)
529 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
530 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
531 struct flow_block_offload *bo)
533 struct flow_indir_dev_info *info;
535 info = find_indir_dev(data);
539 info = kzalloc(sizeof(*info), GFP_KERNEL);
547 info->cleanup = cleanup;
548 info->command = bo->command;
549 info->binder_type = bo->binder_type;
550 info->cb_list = bo->cb_list_head;
552 list_add(&info->list, &flow_indir_dev_list);
556 static int indir_dev_remove(void *data)
558 struct flow_indir_dev_info *info;
560 info = find_indir_dev(data);
564 list_del(&info->list);
570 int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
571 enum tc_setup_type type, void *data,
572 struct flow_block_offload *bo,
573 void (*cleanup)(struct flow_block_cb *block_cb))
575 struct flow_indr_dev *this;
579 mutex_lock(&flow_indr_block_lock);
581 if (bo->command == FLOW_BLOCK_BIND)
582 indir_dev_add(data, dev, sch, type, cleanup, bo);
583 else if (bo->command == FLOW_BLOCK_UNBIND)
584 indir_dev_remove(data);
587 list_for_each_entry(this, &flow_block_indr_dev_list, list) {
588 err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
593 mutex_unlock(&flow_indr_block_lock);
595 return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
597 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
599 bool flow_indr_dev_exists(void)
601 return !list_empty(&flow_block_indr_dev_list);
603 EXPORT_SYMBOL(flow_indr_dev_exists);