Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / mediatek / mtk_ppe_offload.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
4  */
5
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
8 #include <linux/ip.h>
9 #include <net/flow_offload.h>
10 #include <net/pkt_cls.h>
11 #include <net/dsa.h>
12 #include "mtk_eth_soc.h"
13
14 struct mtk_flow_data {
15         struct ethhdr eth;
16
17         union {
18                 struct {
19                         __be32 src_addr;
20                         __be32 dst_addr;
21                 } v4;
22         };
23
24         __be16 src_port;
25         __be16 dst_port;
26
27         struct {
28                 u16 id;
29                 __be16 proto;
30                 u8 num;
31         } vlan;
32         struct {
33                 u16 sid;
34                 u8 num;
35         } pppoe;
36 };
37
38 struct mtk_flow_entry {
39         struct rhash_head node;
40         unsigned long cookie;
41         u16 hash;
42 };
43
44 static const struct rhashtable_params mtk_flow_ht_params = {
45         .head_offset = offsetof(struct mtk_flow_entry, node),
46         .key_offset = offsetof(struct mtk_flow_entry, cookie),
47         .key_len = sizeof(unsigned long),
48         .automatic_shrinking = true,
49 };
50
51 static u32
52 mtk_eth_timestamp(struct mtk_eth *eth)
53 {
54         return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
55 }
56
57 static int
58 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
59                        bool egress)
60 {
61         return mtk_foe_entry_set_ipv4_tuple(foe, egress,
62                                             data->v4.src_addr, data->src_port,
63                                             data->v4.dst_addr, data->dst_port);
64 }
65
66 static void
67 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
68 {
69         void *dest = eth + act->mangle.offset;
70         const void *src = &act->mangle.val;
71
72         if (act->mangle.offset > 8)
73                 return;
74
75         if (act->mangle.mask == 0xffff) {
76                 src += 2;
77                 dest += 2;
78         }
79
80         memcpy(dest, src, act->mangle.mask ? 2 : 4);
81 }
82
83
84 static int
85 mtk_flow_mangle_ports(const struct flow_action_entry *act,
86                       struct mtk_flow_data *data)
87 {
88         u32 val = ntohl(act->mangle.val);
89
90         switch (act->mangle.offset) {
91         case 0:
92                 if (act->mangle.mask == ~htonl(0xffff))
93                         data->dst_port = cpu_to_be16(val);
94                 else
95                         data->src_port = cpu_to_be16(val >> 16);
96                 break;
97         case 2:
98                 data->dst_port = cpu_to_be16(val);
99                 break;
100         default:
101                 return -EINVAL;
102         }
103
104         return 0;
105 }
106
107 static int
108 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
109                      struct mtk_flow_data *data)
110 {
111         __be32 *dest;
112
113         switch (act->mangle.offset) {
114         case offsetof(struct iphdr, saddr):
115                 dest = &data->v4.src_addr;
116                 break;
117         case offsetof(struct iphdr, daddr):
118                 dest = &data->v4.dst_addr;
119                 break;
120         default:
121                 return -EINVAL;
122         }
123
124         memcpy(dest, &act->mangle.val, sizeof(u32));
125
126         return 0;
127 }
128
129 static int
130 mtk_flow_get_dsa_port(struct net_device **dev)
131 {
132 #if IS_ENABLED(CONFIG_NET_DSA)
133         struct dsa_port *dp;
134
135         dp = dsa_port_from_netdev(*dev);
136         if (IS_ERR(dp))
137                 return -ENODEV;
138
139         if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
140                 return -ENODEV;
141
142         *dev = dp->cpu_dp->master;
143
144         return dp->index;
145 #else
146         return -ENODEV;
147 #endif
148 }
149
150 static int
151 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
152                            struct net_device *dev)
153 {
154         int pse_port, dsa_port;
155
156         dsa_port = mtk_flow_get_dsa_port(&dev);
157         if (dsa_port >= 0)
158                 mtk_foe_entry_set_dsa(foe, dsa_port);
159
160         if (dev == eth->netdev[0])
161                 pse_port = 1;
162         else if (dev == eth->netdev[1])
163                 pse_port = 2;
164         else
165                 return -EOPNOTSUPP;
166
167         mtk_foe_entry_set_pse_port(foe, pse_port);
168
169         return 0;
170 }
171
172 static int
173 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
174 {
175         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
176         struct flow_action_entry *act;
177         struct mtk_flow_data data = {};
178         struct mtk_foe_entry foe;
179         struct net_device *odev = NULL;
180         struct mtk_flow_entry *entry;
181         int offload_type = 0;
182         u16 addr_type = 0;
183         u32 timestamp;
184         u8 l4proto = 0;
185         int err = 0;
186         int hash;
187         int i;
188
189         if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
190                 return -EEXIST;
191
192         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
193                 struct flow_match_meta match;
194
195                 flow_rule_match_meta(rule, &match);
196         } else {
197                 return -EOPNOTSUPP;
198         }
199
200         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
201                 struct flow_match_control match;
202
203                 flow_rule_match_control(rule, &match);
204                 addr_type = match.key->addr_type;
205         } else {
206                 return -EOPNOTSUPP;
207         }
208
209         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
210                 struct flow_match_basic match;
211
212                 flow_rule_match_basic(rule, &match);
213                 l4proto = match.key->ip_proto;
214         } else {
215                 return -EOPNOTSUPP;
216         }
217
218         flow_action_for_each(i, act, &rule->action) {
219                 switch (act->id) {
220                 case FLOW_ACTION_MANGLE:
221                         if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
222                                 mtk_flow_offload_mangle_eth(act, &data.eth);
223                         break;
224                 case FLOW_ACTION_REDIRECT:
225                         odev = act->dev;
226                         break;
227                 case FLOW_ACTION_CSUM:
228                         break;
229                 case FLOW_ACTION_VLAN_PUSH:
230                         if (data.vlan.num == 1 ||
231                             act->vlan.proto != htons(ETH_P_8021Q))
232                                 return -EOPNOTSUPP;
233
234                         data.vlan.id = act->vlan.vid;
235                         data.vlan.proto = act->vlan.proto;
236                         data.vlan.num++;
237                         break;
238                 case FLOW_ACTION_VLAN_POP:
239                         break;
240                 case FLOW_ACTION_PPPOE_PUSH:
241                         if (data.pppoe.num == 1)
242                                 return -EOPNOTSUPP;
243
244                         data.pppoe.sid = act->pppoe.sid;
245                         data.pppoe.num++;
246                         break;
247                 default:
248                         return -EOPNOTSUPP;
249                 }
250         }
251
252         switch (addr_type) {
253         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
254                 offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
255                 break;
256         default:
257                 return -EOPNOTSUPP;
258         }
259
260         if (!is_valid_ether_addr(data.eth.h_source) ||
261             !is_valid_ether_addr(data.eth.h_dest))
262                 return -EINVAL;
263
264         err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
265                                     data.eth.h_source,
266                                     data.eth.h_dest);
267         if (err)
268                 return err;
269
270         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
271                 struct flow_match_ports ports;
272
273                 flow_rule_match_ports(rule, &ports);
274                 data.src_port = ports.key->src;
275                 data.dst_port = ports.key->dst;
276         } else {
277                 return -EOPNOTSUPP;
278         }
279
280         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
281                 struct flow_match_ipv4_addrs addrs;
282
283                 flow_rule_match_ipv4_addrs(rule, &addrs);
284
285                 data.v4.src_addr = addrs.key->src;
286                 data.v4.dst_addr = addrs.key->dst;
287
288                 mtk_flow_set_ipv4_addr(&foe, &data, false);
289         }
290
291         flow_action_for_each(i, act, &rule->action) {
292                 if (act->id != FLOW_ACTION_MANGLE)
293                         continue;
294
295                 switch (act->mangle.htype) {
296                 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
297                 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
298                         err = mtk_flow_mangle_ports(act, &data);
299                         break;
300                 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
301                         err = mtk_flow_mangle_ipv4(act, &data);
302                         break;
303                 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
304                         /* handled earlier */
305                         break;
306                 default:
307                         return -EOPNOTSUPP;
308                 }
309
310                 if (err)
311                         return err;
312         }
313
314         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
315                 err = mtk_flow_set_ipv4_addr(&foe, &data, true);
316                 if (err)
317                         return err;
318         }
319
320         if (data.vlan.num == 1) {
321                 if (data.vlan.proto != htons(ETH_P_8021Q))
322                         return -EOPNOTSUPP;
323
324                 mtk_foe_entry_set_vlan(&foe, data.vlan.id);
325         }
326         if (data.pppoe.num == 1)
327                 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
328
329         err = mtk_flow_set_output_device(eth, &foe, odev);
330         if (err)
331                 return err;
332
333         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
334         if (!entry)
335                 return -ENOMEM;
336
337         entry->cookie = f->cookie;
338         timestamp = mtk_eth_timestamp(eth);
339         hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
340         if (hash < 0) {
341                 err = hash;
342                 goto free;
343         }
344
345         entry->hash = hash;
346         err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
347                                      mtk_flow_ht_params);
348         if (err < 0)
349                 goto clear_flow;
350
351         return 0;
352 clear_flow:
353         mtk_foe_entry_clear(&eth->ppe, hash);
354 free:
355         kfree(entry);
356         return err;
357 }
358
359 static int
360 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
361 {
362         struct mtk_flow_entry *entry;
363
364         entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
365                                   mtk_flow_ht_params);
366         if (!entry)
367                 return -ENOENT;
368
369         mtk_foe_entry_clear(&eth->ppe, entry->hash);
370         rhashtable_remove_fast(&eth->flow_table, &entry->node,
371                                mtk_flow_ht_params);
372         kfree(entry);
373
374         return 0;
375 }
376
377 static int
378 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
379 {
380         struct mtk_flow_entry *entry;
381         int timestamp;
382         u32 idle;
383
384         entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
385                                   mtk_flow_ht_params);
386         if (!entry)
387                 return -ENOENT;
388
389         timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
390         if (timestamp < 0)
391                 return -ETIMEDOUT;
392
393         idle = mtk_eth_timestamp(eth) - timestamp;
394         f->stats.lastused = jiffies - idle * HZ;
395
396         return 0;
397 }
398
399 static DEFINE_MUTEX(mtk_flow_offload_mutex);
400
401 static int
402 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
403 {
404         struct flow_cls_offload *cls = type_data;
405         struct net_device *dev = cb_priv;
406         struct mtk_mac *mac = netdev_priv(dev);
407         struct mtk_eth *eth = mac->hw;
408         int err;
409
410         if (!tc_can_offload(dev))
411                 return -EOPNOTSUPP;
412
413         if (type != TC_SETUP_CLSFLOWER)
414                 return -EOPNOTSUPP;
415
416         mutex_lock(&mtk_flow_offload_mutex);
417         switch (cls->command) {
418         case FLOW_CLS_REPLACE:
419                 err = mtk_flow_offload_replace(eth, cls);
420                 break;
421         case FLOW_CLS_DESTROY:
422                 err = mtk_flow_offload_destroy(eth, cls);
423                 break;
424         case FLOW_CLS_STATS:
425                 err = mtk_flow_offload_stats(eth, cls);
426                 break;
427         default:
428                 err = -EOPNOTSUPP;
429                 break;
430         }
431         mutex_unlock(&mtk_flow_offload_mutex);
432
433         return err;
434 }
435
436 static int
437 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
438 {
439         struct mtk_mac *mac = netdev_priv(dev);
440         struct mtk_eth *eth = mac->hw;
441         static LIST_HEAD(block_cb_list);
442         struct flow_block_cb *block_cb;
443         flow_setup_cb_t *cb;
444
445         if (!eth->ppe.foe_table)
446                 return -EOPNOTSUPP;
447
448         if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
449                 return -EOPNOTSUPP;
450
451         cb = mtk_eth_setup_tc_block_cb;
452         f->driver_block_list = &block_cb_list;
453
454         switch (f->command) {
455         case FLOW_BLOCK_BIND:
456                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
457                 if (block_cb) {
458                         flow_block_cb_incref(block_cb);
459                         return 0;
460                 }
461                 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
462                 if (IS_ERR(block_cb))
463                         return PTR_ERR(block_cb);
464
465                 flow_block_cb_add(block_cb, f);
466                 list_add_tail(&block_cb->driver_list, &block_cb_list);
467                 return 0;
468         case FLOW_BLOCK_UNBIND:
469                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
470                 if (!block_cb)
471                         return -ENOENT;
472
473                 if (flow_block_cb_decref(block_cb)) {
474                         flow_block_cb_remove(block_cb, f);
475                         list_del(&block_cb->driver_list);
476                 }
477                 return 0;
478         default:
479                 return -EOPNOTSUPP;
480         }
481 }
482
483 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
484                      void *type_data)
485 {
486         if (type == TC_SETUP_FT)
487                 return mtk_eth_setup_tc_block(dev, type_data);
488
489         return -EOPNOTSUPP;
490 }
491
492 int mtk_eth_offload_init(struct mtk_eth *eth)
493 {
494         if (!eth->ppe.foe_table)
495                 return 0;
496
497         return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
498 }