1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
9 #include <net/flow_offload.h>
10 #include <net/pkt_cls.h>
12 #include "mtk_eth_soc.h"
14 struct mtk_flow_data {
38 struct mtk_flow_entry {
39 struct rhash_head node;
44 static const struct rhashtable_params mtk_flow_ht_params = {
45 .head_offset = offsetof(struct mtk_flow_entry, node),
46 .key_offset = offsetof(struct mtk_flow_entry, cookie),
47 .key_len = sizeof(unsigned long),
48 .automatic_shrinking = true,
52 mtk_eth_timestamp(struct mtk_eth *eth)
54 return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
58 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
61 return mtk_foe_entry_set_ipv4_tuple(foe, egress,
62 data->v4.src_addr, data->src_port,
63 data->v4.dst_addr, data->dst_port);
67 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
69 void *dest = eth + act->mangle.offset;
70 const void *src = &act->mangle.val;
72 if (act->mangle.offset > 8)
75 if (act->mangle.mask == 0xffff) {
80 memcpy(dest, src, act->mangle.mask ? 2 : 4);
85 mtk_flow_mangle_ports(const struct flow_action_entry *act,
86 struct mtk_flow_data *data)
88 u32 val = ntohl(act->mangle.val);
90 switch (act->mangle.offset) {
92 if (act->mangle.mask == ~htonl(0xffff))
93 data->dst_port = cpu_to_be16(val);
95 data->src_port = cpu_to_be16(val >> 16);
98 data->dst_port = cpu_to_be16(val);
108 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
109 struct mtk_flow_data *data)
113 switch (act->mangle.offset) {
114 case offsetof(struct iphdr, saddr):
115 dest = &data->v4.src_addr;
117 case offsetof(struct iphdr, daddr):
118 dest = &data->v4.dst_addr;
124 memcpy(dest, &act->mangle.val, sizeof(u32));
130 mtk_flow_get_dsa_port(struct net_device **dev)
132 #if IS_ENABLED(CONFIG_NET_DSA)
135 dp = dsa_port_from_netdev(*dev);
139 if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
142 *dev = dp->cpu_dp->master;
151 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
152 struct net_device *dev)
154 int pse_port, dsa_port;
156 dsa_port = mtk_flow_get_dsa_port(&dev);
158 mtk_foe_entry_set_dsa(foe, dsa_port);
160 if (dev == eth->netdev[0])
162 else if (dev == eth->netdev[1])
167 mtk_foe_entry_set_pse_port(foe, pse_port);
173 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
175 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
176 struct flow_action_entry *act;
177 struct mtk_flow_data data = {};
178 struct mtk_foe_entry foe;
179 struct net_device *odev = NULL;
180 struct mtk_flow_entry *entry;
181 int offload_type = 0;
189 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
190 struct flow_match_meta match;
192 flow_rule_match_meta(rule, &match);
197 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
198 struct flow_match_control match;
200 flow_rule_match_control(rule, &match);
201 addr_type = match.key->addr_type;
206 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
207 struct flow_match_basic match;
209 flow_rule_match_basic(rule, &match);
210 l4proto = match.key->ip_proto;
215 flow_action_for_each(i, act, &rule->action) {
217 case FLOW_ACTION_MANGLE:
218 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
219 mtk_flow_offload_mangle_eth(act, &data.eth);
221 case FLOW_ACTION_REDIRECT:
224 case FLOW_ACTION_CSUM:
226 case FLOW_ACTION_VLAN_PUSH:
227 if (data.vlan.num == 1 ||
228 act->vlan.proto != htons(ETH_P_8021Q))
231 data.vlan.id = act->vlan.vid;
232 data.vlan.proto = act->vlan.proto;
235 case FLOW_ACTION_VLAN_POP:
237 case FLOW_ACTION_PPPOE_PUSH:
238 if (data.pppoe.num == 1)
241 data.pppoe.sid = act->pppoe.sid;
250 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
251 offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
257 if (!is_valid_ether_addr(data.eth.h_source) ||
258 !is_valid_ether_addr(data.eth.h_dest))
261 err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
267 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
268 struct flow_match_ports ports;
270 flow_rule_match_ports(rule, &ports);
271 data.src_port = ports.key->src;
272 data.dst_port = ports.key->dst;
277 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
278 struct flow_match_ipv4_addrs addrs;
280 flow_rule_match_ipv4_addrs(rule, &addrs);
282 data.v4.src_addr = addrs.key->src;
283 data.v4.dst_addr = addrs.key->dst;
285 mtk_flow_set_ipv4_addr(&foe, &data, false);
288 flow_action_for_each(i, act, &rule->action) {
289 if (act->id != FLOW_ACTION_MANGLE)
292 switch (act->mangle.htype) {
293 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
294 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
295 err = mtk_flow_mangle_ports(act, &data);
297 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
298 err = mtk_flow_mangle_ipv4(act, &data);
300 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
301 /* handled earlier */
311 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
312 err = mtk_flow_set_ipv4_addr(&foe, &data, true);
317 if (data.vlan.num == 1) {
318 if (data.vlan.proto != htons(ETH_P_8021Q))
321 mtk_foe_entry_set_vlan(&foe, data.vlan.id);
323 if (data.pppoe.num == 1)
324 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
326 err = mtk_flow_set_output_device(eth, &foe, odev);
330 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
334 entry->cookie = f->cookie;
335 timestamp = mtk_eth_timestamp(eth);
336 hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp);
343 err = rhashtable_insert_fast(ð->flow_table, &entry->node,
350 mtk_foe_entry_clear(ð->ppe, hash);
357 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
359 struct mtk_flow_entry *entry;
361 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
366 mtk_foe_entry_clear(ð->ppe, entry->hash);
367 rhashtable_remove_fast(ð->flow_table, &entry->node,
375 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
377 struct mtk_flow_entry *entry;
381 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
386 timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash);
390 idle = mtk_eth_timestamp(eth) - timestamp;
391 f->stats.lastused = jiffies - idle * HZ;
396 static DEFINE_MUTEX(mtk_flow_offload_mutex);
399 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
401 struct flow_cls_offload *cls = type_data;
402 struct net_device *dev = cb_priv;
403 struct mtk_mac *mac = netdev_priv(dev);
404 struct mtk_eth *eth = mac->hw;
407 if (!tc_can_offload(dev))
410 if (type != TC_SETUP_CLSFLOWER)
413 mutex_lock(&mtk_flow_offload_mutex);
414 switch (cls->command) {
415 case FLOW_CLS_REPLACE:
416 err = mtk_flow_offload_replace(eth, cls);
418 case FLOW_CLS_DESTROY:
419 err = mtk_flow_offload_destroy(eth, cls);
422 err = mtk_flow_offload_stats(eth, cls);
428 mutex_unlock(&mtk_flow_offload_mutex);
434 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
436 struct mtk_mac *mac = netdev_priv(dev);
437 struct mtk_eth *eth = mac->hw;
438 static LIST_HEAD(block_cb_list);
439 struct flow_block_cb *block_cb;
442 if (!eth->ppe.foe_table)
445 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
448 cb = mtk_eth_setup_tc_block_cb;
449 f->driver_block_list = &block_cb_list;
451 switch (f->command) {
452 case FLOW_BLOCK_BIND:
453 block_cb = flow_block_cb_lookup(f->block, cb, dev);
455 flow_block_cb_incref(block_cb);
458 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
459 if (IS_ERR(block_cb))
460 return PTR_ERR(block_cb);
462 flow_block_cb_add(block_cb, f);
463 list_add_tail(&block_cb->driver_list, &block_cb_list);
465 case FLOW_BLOCK_UNBIND:
466 block_cb = flow_block_cb_lookup(f->block, cb, dev);
470 if (flow_block_cb_decref(block_cb)) {
471 flow_block_cb_remove(block_cb, f);
472 list_del(&block_cb->driver_list);
480 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
483 if (type == TC_SETUP_FT)
484 return mtk_eth_setup_tc_block(dev, type_data);
489 int mtk_eth_offload_init(struct mtk_eth *eth)
491 if (!eth->ppe.foe_table)
494 return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);