ARM: 9148/1: handle CONFIG_CPU_ENDIAN_BE32 in arch/arm/kernel/head.S
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / mediatek / mtk_ppe_offload.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
4  */
5
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
8 #include <linux/ip.h>
9 #include <net/flow_offload.h>
10 #include <net/pkt_cls.h>
11 #include <net/dsa.h>
12 #include "mtk_eth_soc.h"
13
14 struct mtk_flow_data {
15         struct ethhdr eth;
16
17         union {
18                 struct {
19                         __be32 src_addr;
20                         __be32 dst_addr;
21                 } v4;
22         };
23
24         __be16 src_port;
25         __be16 dst_port;
26
27         struct {
28                 u16 id;
29                 __be16 proto;
30                 u8 num;
31         } vlan;
32         struct {
33                 u16 sid;
34                 u8 num;
35         } pppoe;
36 };
37
38 struct mtk_flow_entry {
39         struct rhash_head node;
40         unsigned long cookie;
41         u16 hash;
42 };
43
44 static const struct rhashtable_params mtk_flow_ht_params = {
45         .head_offset = offsetof(struct mtk_flow_entry, node),
46         .key_offset = offsetof(struct mtk_flow_entry, cookie),
47         .key_len = sizeof(unsigned long),
48         .automatic_shrinking = true,
49 };
50
51 static u32
52 mtk_eth_timestamp(struct mtk_eth *eth)
53 {
54         return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
55 }
56
57 static int
58 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
59                        bool egress)
60 {
61         return mtk_foe_entry_set_ipv4_tuple(foe, egress,
62                                             data->v4.src_addr, data->src_port,
63                                             data->v4.dst_addr, data->dst_port);
64 }
65
66 static void
67 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
68 {
69         void *dest = eth + act->mangle.offset;
70         const void *src = &act->mangle.val;
71
72         if (act->mangle.offset > 8)
73                 return;
74
75         if (act->mangle.mask == 0xffff) {
76                 src += 2;
77                 dest += 2;
78         }
79
80         memcpy(dest, src, act->mangle.mask ? 2 : 4);
81 }
82
83
84 static int
85 mtk_flow_mangle_ports(const struct flow_action_entry *act,
86                       struct mtk_flow_data *data)
87 {
88         u32 val = ntohl(act->mangle.val);
89
90         switch (act->mangle.offset) {
91         case 0:
92                 if (act->mangle.mask == ~htonl(0xffff))
93                         data->dst_port = cpu_to_be16(val);
94                 else
95                         data->src_port = cpu_to_be16(val >> 16);
96                 break;
97         case 2:
98                 data->dst_port = cpu_to_be16(val);
99                 break;
100         default:
101                 return -EINVAL;
102         }
103
104         return 0;
105 }
106
107 static int
108 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
109                      struct mtk_flow_data *data)
110 {
111         __be32 *dest;
112
113         switch (act->mangle.offset) {
114         case offsetof(struct iphdr, saddr):
115                 dest = &data->v4.src_addr;
116                 break;
117         case offsetof(struct iphdr, daddr):
118                 dest = &data->v4.dst_addr;
119                 break;
120         default:
121                 return -EINVAL;
122         }
123
124         memcpy(dest, &act->mangle.val, sizeof(u32));
125
126         return 0;
127 }
128
129 static int
130 mtk_flow_get_dsa_port(struct net_device **dev)
131 {
132 #if IS_ENABLED(CONFIG_NET_DSA)
133         struct dsa_port *dp;
134
135         dp = dsa_port_from_netdev(*dev);
136         if (IS_ERR(dp))
137                 return -ENODEV;
138
139         if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
140                 return -ENODEV;
141
142         *dev = dp->cpu_dp->master;
143
144         return dp->index;
145 #else
146         return -ENODEV;
147 #endif
148 }
149
150 static int
151 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
152                            struct net_device *dev)
153 {
154         int pse_port, dsa_port;
155
156         dsa_port = mtk_flow_get_dsa_port(&dev);
157         if (dsa_port >= 0)
158                 mtk_foe_entry_set_dsa(foe, dsa_port);
159
160         if (dev == eth->netdev[0])
161                 pse_port = 1;
162         else if (dev == eth->netdev[1])
163                 pse_port = 2;
164         else
165                 return -EOPNOTSUPP;
166
167         mtk_foe_entry_set_pse_port(foe, pse_port);
168
169         return 0;
170 }
171
172 static int
173 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
174 {
175         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
176         struct flow_action_entry *act;
177         struct mtk_flow_data data = {};
178         struct mtk_foe_entry foe;
179         struct net_device *odev = NULL;
180         struct mtk_flow_entry *entry;
181         int offload_type = 0;
182         u16 addr_type = 0;
183         u32 timestamp;
184         u8 l4proto = 0;
185         int err = 0;
186         int hash;
187         int i;
188
189         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
190                 struct flow_match_meta match;
191
192                 flow_rule_match_meta(rule, &match);
193         } else {
194                 return -EOPNOTSUPP;
195         }
196
197         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
198                 struct flow_match_control match;
199
200                 flow_rule_match_control(rule, &match);
201                 addr_type = match.key->addr_type;
202         } else {
203                 return -EOPNOTSUPP;
204         }
205
206         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
207                 struct flow_match_basic match;
208
209                 flow_rule_match_basic(rule, &match);
210                 l4proto = match.key->ip_proto;
211         } else {
212                 return -EOPNOTSUPP;
213         }
214
215         flow_action_for_each(i, act, &rule->action) {
216                 switch (act->id) {
217                 case FLOW_ACTION_MANGLE:
218                         if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
219                                 mtk_flow_offload_mangle_eth(act, &data.eth);
220                         break;
221                 case FLOW_ACTION_REDIRECT:
222                         odev = act->dev;
223                         break;
224                 case FLOW_ACTION_CSUM:
225                         break;
226                 case FLOW_ACTION_VLAN_PUSH:
227                         if (data.vlan.num == 1 ||
228                             act->vlan.proto != htons(ETH_P_8021Q))
229                                 return -EOPNOTSUPP;
230
231                         data.vlan.id = act->vlan.vid;
232                         data.vlan.proto = act->vlan.proto;
233                         data.vlan.num++;
234                         break;
235                 case FLOW_ACTION_VLAN_POP:
236                         break;
237                 case FLOW_ACTION_PPPOE_PUSH:
238                         if (data.pppoe.num == 1)
239                                 return -EOPNOTSUPP;
240
241                         data.pppoe.sid = act->pppoe.sid;
242                         data.pppoe.num++;
243                         break;
244                 default:
245                         return -EOPNOTSUPP;
246                 }
247         }
248
249         switch (addr_type) {
250         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
251                 offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
252                 break;
253         default:
254                 return -EOPNOTSUPP;
255         }
256
257         if (!is_valid_ether_addr(data.eth.h_source) ||
258             !is_valid_ether_addr(data.eth.h_dest))
259                 return -EINVAL;
260
261         err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
262                                     data.eth.h_source,
263                                     data.eth.h_dest);
264         if (err)
265                 return err;
266
267         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
268                 struct flow_match_ports ports;
269
270                 flow_rule_match_ports(rule, &ports);
271                 data.src_port = ports.key->src;
272                 data.dst_port = ports.key->dst;
273         } else {
274                 return -EOPNOTSUPP;
275         }
276
277         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
278                 struct flow_match_ipv4_addrs addrs;
279
280                 flow_rule_match_ipv4_addrs(rule, &addrs);
281
282                 data.v4.src_addr = addrs.key->src;
283                 data.v4.dst_addr = addrs.key->dst;
284
285                 mtk_flow_set_ipv4_addr(&foe, &data, false);
286         }
287
288         flow_action_for_each(i, act, &rule->action) {
289                 if (act->id != FLOW_ACTION_MANGLE)
290                         continue;
291
292                 switch (act->mangle.htype) {
293                 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
294                 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
295                         err = mtk_flow_mangle_ports(act, &data);
296                         break;
297                 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
298                         err = mtk_flow_mangle_ipv4(act, &data);
299                         break;
300                 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
301                         /* handled earlier */
302                         break;
303                 default:
304                         return -EOPNOTSUPP;
305                 }
306
307                 if (err)
308                         return err;
309         }
310
311         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
312                 err = mtk_flow_set_ipv4_addr(&foe, &data, true);
313                 if (err)
314                         return err;
315         }
316
317         if (data.vlan.num == 1) {
318                 if (data.vlan.proto != htons(ETH_P_8021Q))
319                         return -EOPNOTSUPP;
320
321                 mtk_foe_entry_set_vlan(&foe, data.vlan.id);
322         }
323         if (data.pppoe.num == 1)
324                 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
325
326         err = mtk_flow_set_output_device(eth, &foe, odev);
327         if (err)
328                 return err;
329
330         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
331         if (!entry)
332                 return -ENOMEM;
333
334         entry->cookie = f->cookie;
335         timestamp = mtk_eth_timestamp(eth);
336         hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
337         if (hash < 0) {
338                 err = hash;
339                 goto free;
340         }
341
342         entry->hash = hash;
343         err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
344                                      mtk_flow_ht_params);
345         if (err < 0)
346                 goto clear_flow;
347
348         return 0;
349 clear_flow:
350         mtk_foe_entry_clear(&eth->ppe, hash);
351 free:
352         kfree(entry);
353         return err;
354 }
355
356 static int
357 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
358 {
359         struct mtk_flow_entry *entry;
360
361         entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
362                                   mtk_flow_ht_params);
363         if (!entry)
364                 return -ENOENT;
365
366         mtk_foe_entry_clear(&eth->ppe, entry->hash);
367         rhashtable_remove_fast(&eth->flow_table, &entry->node,
368                                mtk_flow_ht_params);
369         kfree(entry);
370
371         return 0;
372 }
373
374 static int
375 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
376 {
377         struct mtk_flow_entry *entry;
378         int timestamp;
379         u32 idle;
380
381         entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
382                                   mtk_flow_ht_params);
383         if (!entry)
384                 return -ENOENT;
385
386         timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
387         if (timestamp < 0)
388                 return -ETIMEDOUT;
389
390         idle = mtk_eth_timestamp(eth) - timestamp;
391         f->stats.lastused = jiffies - idle * HZ;
392
393         return 0;
394 }
395
396 static DEFINE_MUTEX(mtk_flow_offload_mutex);
397
398 static int
399 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
400 {
401         struct flow_cls_offload *cls = type_data;
402         struct net_device *dev = cb_priv;
403         struct mtk_mac *mac = netdev_priv(dev);
404         struct mtk_eth *eth = mac->hw;
405         int err;
406
407         if (!tc_can_offload(dev))
408                 return -EOPNOTSUPP;
409
410         if (type != TC_SETUP_CLSFLOWER)
411                 return -EOPNOTSUPP;
412
413         mutex_lock(&mtk_flow_offload_mutex);
414         switch (cls->command) {
415         case FLOW_CLS_REPLACE:
416                 err = mtk_flow_offload_replace(eth, cls);
417                 break;
418         case FLOW_CLS_DESTROY:
419                 err = mtk_flow_offload_destroy(eth, cls);
420                 break;
421         case FLOW_CLS_STATS:
422                 err = mtk_flow_offload_stats(eth, cls);
423                 break;
424         default:
425                 err = -EOPNOTSUPP;
426                 break;
427         }
428         mutex_unlock(&mtk_flow_offload_mutex);
429
430         return err;
431 }
432
433 static int
434 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
435 {
436         struct mtk_mac *mac = netdev_priv(dev);
437         struct mtk_eth *eth = mac->hw;
438         static LIST_HEAD(block_cb_list);
439         struct flow_block_cb *block_cb;
440         flow_setup_cb_t *cb;
441
442         if (!eth->ppe.foe_table)
443                 return -EOPNOTSUPP;
444
445         if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
446                 return -EOPNOTSUPP;
447
448         cb = mtk_eth_setup_tc_block_cb;
449         f->driver_block_list = &block_cb_list;
450
451         switch (f->command) {
452         case FLOW_BLOCK_BIND:
453                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
454                 if (block_cb) {
455                         flow_block_cb_incref(block_cb);
456                         return 0;
457                 }
458                 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
459                 if (IS_ERR(block_cb))
460                         return PTR_ERR(block_cb);
461
462                 flow_block_cb_add(block_cb, f);
463                 list_add_tail(&block_cb->driver_list, &block_cb_list);
464                 return 0;
465         case FLOW_BLOCK_UNBIND:
466                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
467                 if (!block_cb)
468                         return -ENOENT;
469
470                 if (flow_block_cb_decref(block_cb)) {
471                         flow_block_cb_remove(block_cb, f);
472                         list_del(&block_cb->driver_list);
473                 }
474                 return 0;
475         default:
476                 return -EOPNOTSUPP;
477         }
478 }
479
480 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
481                      void *type_data)
482 {
483         if (type == TC_SETUP_FT)
484                 return mtk_eth_setup_tc_block(dev, type_data);
485
486         return -EOPNOTSUPP;
487 }
488
489 int mtk_eth_offload_init(struct mtk_eth *eth)
490 {
491         if (!eth->ppe.foe_table)
492                 return 0;
493
494         return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
495 }