1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
9 #include <net/flow_offload.h>
10 #include <net/pkt_cls.h>
12 #include "mtk_eth_soc.h"
14 struct mtk_flow_data {
38 struct mtk_flow_entry {
39 struct rhash_head node;
44 static const struct rhashtable_params mtk_flow_ht_params = {
45 .head_offset = offsetof(struct mtk_flow_entry, node),
46 .key_offset = offsetof(struct mtk_flow_entry, cookie),
47 .key_len = sizeof(unsigned long),
48 .automatic_shrinking = true,
52 mtk_eth_timestamp(struct mtk_eth *eth)
54 return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
58 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
61 return mtk_foe_entry_set_ipv4_tuple(foe, egress,
62 data->v4.src_addr, data->src_port,
63 data->v4.dst_addr, data->dst_port);
67 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
69 void *dest = eth + act->mangle.offset;
70 const void *src = &act->mangle.val;
72 if (act->mangle.offset > 8)
75 if (act->mangle.mask == 0xffff) {
80 memcpy(dest, src, act->mangle.mask ? 2 : 4);
85 mtk_flow_mangle_ports(const struct flow_action_entry *act,
86 struct mtk_flow_data *data)
88 u32 val = ntohl(act->mangle.val);
90 switch (act->mangle.offset) {
92 if (act->mangle.mask == ~htonl(0xffff))
93 data->dst_port = cpu_to_be16(val);
95 data->src_port = cpu_to_be16(val >> 16);
98 data->dst_port = cpu_to_be16(val);
108 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
109 struct mtk_flow_data *data)
113 switch (act->mangle.offset) {
114 case offsetof(struct iphdr, saddr):
115 dest = &data->v4.src_addr;
117 case offsetof(struct iphdr, daddr):
118 dest = &data->v4.dst_addr;
124 memcpy(dest, &act->mangle.val, sizeof(u32));
130 mtk_flow_get_dsa_port(struct net_device **dev)
132 #if IS_ENABLED(CONFIG_NET_DSA)
135 dp = dsa_port_from_netdev(*dev);
139 if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
142 *dev = dp->cpu_dp->master;
151 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
152 struct net_device *dev)
154 int pse_port, dsa_port;
156 dsa_port = mtk_flow_get_dsa_port(&dev);
158 mtk_foe_entry_set_dsa(foe, dsa_port);
160 if (dev == eth->netdev[0])
162 else if (dev == eth->netdev[1])
167 mtk_foe_entry_set_pse_port(foe, pse_port);
173 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
175 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
176 struct flow_action_entry *act;
177 struct mtk_flow_data data = {};
178 struct mtk_foe_entry foe;
179 struct net_device *odev = NULL;
180 struct mtk_flow_entry *entry;
181 int offload_type = 0;
189 if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params))
192 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
193 struct flow_match_meta match;
195 flow_rule_match_meta(rule, &match);
200 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
201 struct flow_match_control match;
203 flow_rule_match_control(rule, &match);
204 addr_type = match.key->addr_type;
209 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
210 struct flow_match_basic match;
212 flow_rule_match_basic(rule, &match);
213 l4proto = match.key->ip_proto;
218 flow_action_for_each(i, act, &rule->action) {
220 case FLOW_ACTION_MANGLE:
221 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
222 mtk_flow_offload_mangle_eth(act, &data.eth);
224 case FLOW_ACTION_REDIRECT:
227 case FLOW_ACTION_CSUM:
229 case FLOW_ACTION_VLAN_PUSH:
230 if (data.vlan.num == 1 ||
231 act->vlan.proto != htons(ETH_P_8021Q))
234 data.vlan.id = act->vlan.vid;
235 data.vlan.proto = act->vlan.proto;
238 case FLOW_ACTION_VLAN_POP:
240 case FLOW_ACTION_PPPOE_PUSH:
241 if (data.pppoe.num == 1)
244 data.pppoe.sid = act->pppoe.sid;
253 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
254 offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
260 if (!is_valid_ether_addr(data.eth.h_source) ||
261 !is_valid_ether_addr(data.eth.h_dest))
264 err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
270 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
271 struct flow_match_ports ports;
273 flow_rule_match_ports(rule, &ports);
274 data.src_port = ports.key->src;
275 data.dst_port = ports.key->dst;
280 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
281 struct flow_match_ipv4_addrs addrs;
283 flow_rule_match_ipv4_addrs(rule, &addrs);
285 data.v4.src_addr = addrs.key->src;
286 data.v4.dst_addr = addrs.key->dst;
288 mtk_flow_set_ipv4_addr(&foe, &data, false);
291 flow_action_for_each(i, act, &rule->action) {
292 if (act->id != FLOW_ACTION_MANGLE)
295 switch (act->mangle.htype) {
296 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
297 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
298 err = mtk_flow_mangle_ports(act, &data);
300 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
301 err = mtk_flow_mangle_ipv4(act, &data);
303 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
304 /* handled earlier */
314 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
315 err = mtk_flow_set_ipv4_addr(&foe, &data, true);
320 if (data.vlan.num == 1) {
321 if (data.vlan.proto != htons(ETH_P_8021Q))
324 mtk_foe_entry_set_vlan(&foe, data.vlan.id);
326 if (data.pppoe.num == 1)
327 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
329 err = mtk_flow_set_output_device(eth, &foe, odev);
333 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
337 entry->cookie = f->cookie;
338 timestamp = mtk_eth_timestamp(eth);
339 hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp);
346 err = rhashtable_insert_fast(ð->flow_table, &entry->node,
353 mtk_foe_entry_clear(ð->ppe, hash);
360 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
362 struct mtk_flow_entry *entry;
364 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
369 mtk_foe_entry_clear(ð->ppe, entry->hash);
370 rhashtable_remove_fast(ð->flow_table, &entry->node,
378 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
380 struct mtk_flow_entry *entry;
384 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
389 timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash);
393 idle = mtk_eth_timestamp(eth) - timestamp;
394 f->stats.lastused = jiffies - idle * HZ;
399 static DEFINE_MUTEX(mtk_flow_offload_mutex);
402 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
404 struct flow_cls_offload *cls = type_data;
405 struct net_device *dev = cb_priv;
406 struct mtk_mac *mac = netdev_priv(dev);
407 struct mtk_eth *eth = mac->hw;
410 if (!tc_can_offload(dev))
413 if (type != TC_SETUP_CLSFLOWER)
416 mutex_lock(&mtk_flow_offload_mutex);
417 switch (cls->command) {
418 case FLOW_CLS_REPLACE:
419 err = mtk_flow_offload_replace(eth, cls);
421 case FLOW_CLS_DESTROY:
422 err = mtk_flow_offload_destroy(eth, cls);
425 err = mtk_flow_offload_stats(eth, cls);
431 mutex_unlock(&mtk_flow_offload_mutex);
437 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
439 struct mtk_mac *mac = netdev_priv(dev);
440 struct mtk_eth *eth = mac->hw;
441 static LIST_HEAD(block_cb_list);
442 struct flow_block_cb *block_cb;
445 if (!eth->ppe.foe_table)
448 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
451 cb = mtk_eth_setup_tc_block_cb;
452 f->driver_block_list = &block_cb_list;
454 switch (f->command) {
455 case FLOW_BLOCK_BIND:
456 block_cb = flow_block_cb_lookup(f->block, cb, dev);
458 flow_block_cb_incref(block_cb);
461 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
462 if (IS_ERR(block_cb))
463 return PTR_ERR(block_cb);
465 flow_block_cb_add(block_cb, f);
466 list_add_tail(&block_cb->driver_list, &block_cb_list);
468 case FLOW_BLOCK_UNBIND:
469 block_cb = flow_block_cb_lookup(f->block, cb, dev);
473 if (flow_block_cb_decref(block_cb)) {
474 flow_block_cb_remove(block_cb, f);
475 list_del(&block_cb->driver_list);
483 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
486 if (type == TC_SETUP_FT)
487 return mtk_eth_setup_tc_block(dev, type_data);
492 int mtk_eth_offload_init(struct mtk_eth *eth)
494 if (!eth->ppe.foe_table)
497 return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);