9129821f3ab8fecd1d4cdbefff090057babddfb5
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mediatek / mtk_ppe.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dst_metadata.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_ppe.h"
15 #include "mtk_ppe_regs.h"
16
17 static DEFINE_SPINLOCK(ppe_lock);
18
19 static const struct rhashtable_params mtk_flow_l2_ht_params = {
20         .head_offset = offsetof(struct mtk_flow_entry, l2_node),
21         .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
22         .key_len = offsetof(struct mtk_foe_bridge, key_end),
23         .automatic_shrinking = true,
24 };
25
26 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
27 {
28         writel(val, ppe->base + reg);
29 }
30
31 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
32 {
33         return readl(ppe->base + reg);
34 }
35
36 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
37 {
38         u32 val;
39
40         val = ppe_r32(ppe, reg);
41         val &= ~mask;
42         val |= set;
43         ppe_w32(ppe, reg, val);
44
45         return val;
46 }
47
48 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
49 {
50         return ppe_m32(ppe, reg, 0, val);
51 }
52
53 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
54 {
55         return ppe_m32(ppe, reg, val, 0);
56 }
57
58 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
59 {
60         return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
61 }
62
63 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
64 {
65         int ret;
66         u32 val;
67
68         ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
69                                  !(val & MTK_PPE_GLO_CFG_BUSY),
70                                  20, MTK_PPE_WAIT_TIMEOUT_US);
71
72         if (ret)
73                 dev_err(ppe->dev, "PPE table busy");
74
75         return ret;
76 }
77
78 static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
79 {
80         int ret;
81         u32 val;
82
83         ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
84                                  !(val & MTK_PPE_MIB_SER_CR_ST),
85                                  20, MTK_PPE_WAIT_TIMEOUT_US);
86
87         if (ret)
88                 dev_err(ppe->dev, "MIB table busy");
89
90         return ret;
91 }
92
93 static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
94 {
95         u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high;
96         u32 val, cnt_r0, cnt_r1, cnt_r2;
97         int ret;
98
99         val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
100         ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
101
102         ret = mtk_ppe_mib_wait_busy(ppe);
103         if (ret)
104                 return ret;
105
106         cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
107         cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
108         cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
109
110         byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
111         byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
112         pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
113         pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
114         *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
115         *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
116
117         return 0;
118 }
119
120 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
121 {
122         ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
123         ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
124 }
125
126 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
127 {
128         mtk_ppe_cache_clear(ppe);
129
130         ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
131                 enable * MTK_PPE_CACHE_CTL_EN);
132 }
133
134 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
135 {
136         u32 hv1, hv2, hv3;
137         u32 hash;
138
139         switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
140                 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
141                 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
142                         hv1 = e->ipv4.orig.ports;
143                         hv2 = e->ipv4.orig.dest_ip;
144                         hv3 = e->ipv4.orig.src_ip;
145                         break;
146                 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
147                 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
148                         hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
149                         hv1 ^= e->ipv6.ports;
150
151                         hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
152                         hv2 ^= e->ipv6.dest_ip[0];
153
154                         hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
155                         hv3 ^= e->ipv6.src_ip[0];
156                         break;
157                 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
158                 case MTK_PPE_PKT_TYPE_IPV6_6RD:
159                 default:
160                         WARN_ON_ONCE(1);
161                         return MTK_PPE_HASH_MASK;
162         }
163
164         hash = (hv1 & hv2) | ((~hv1) & hv3);
165         hash = (hash >> 24) | ((hash & 0xffffff) << 8);
166         hash ^= hv1 ^ hv2 ^ hv3;
167         hash ^= hash >> 16;
168         hash <<= (ffs(eth->soc->hash_offset) - 1);
169         hash &= MTK_PPE_ENTRIES - 1;
170
171         return hash;
172 }
173
174 static inline struct mtk_foe_mac_info *
175 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
176 {
177         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
178
179         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
180                 return &entry->bridge.l2;
181
182         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
183                 return &entry->ipv6.l2;
184
185         return &entry->ipv4.l2;
186 }
187
188 static inline u32 *
189 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
190 {
191         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
192
193         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
194                 return &entry->bridge.ib2;
195
196         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
197                 return &entry->ipv6.ib2;
198
199         return &entry->ipv4.ib2;
200 }
201
202 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
203                           int type, int l4proto, u8 pse_port, u8 *src_mac,
204                           u8 *dest_mac)
205 {
206         struct mtk_foe_mac_info *l2;
207         u32 ports_pad, val;
208
209         memset(entry, 0, sizeof(*entry));
210
211         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
212                 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
213                       FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
214                       FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
215                       MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
216                 entry->ib1 = val;
217
218                 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
219                       FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
220         } else {
221                 int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
222
223                 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
224                       FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
225                       FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
226                       MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
227                 entry->ib1 = val;
228
229                 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
230                       FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
231                       FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
232         }
233
234         if (is_multicast_ether_addr(dest_mac))
235                 val |= mtk_get_ib2_multicast_mask(eth);
236
237         ports_pad = 0xa5a5a500 | (l4proto & 0xff);
238         if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
239                 entry->ipv4.orig.ports = ports_pad;
240         if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
241                 entry->ipv6.ports = ports_pad;
242
243         if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
244                 ether_addr_copy(entry->bridge.src_mac, src_mac);
245                 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
246                 entry->bridge.ib2 = val;
247                 l2 = &entry->bridge.l2;
248         } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
249                 entry->ipv6.ib2 = val;
250                 l2 = &entry->ipv6.l2;
251         } else {
252                 entry->ipv4.ib2 = val;
253                 l2 = &entry->ipv4.l2;
254         }
255
256         l2->dest_mac_hi = get_unaligned_be32(dest_mac);
257         l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
258         l2->src_mac_hi = get_unaligned_be32(src_mac);
259         l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
260
261         if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
262                 l2->etype = ETH_P_IPV6;
263         else
264                 l2->etype = ETH_P_IP;
265
266         return 0;
267 }
268
269 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
270                                struct mtk_foe_entry *entry, u8 port)
271 {
272         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
273         u32 val = *ib2;
274
275         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
276                 val &= ~MTK_FOE_IB2_DEST_PORT_V2;
277                 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
278         } else {
279                 val &= ~MTK_FOE_IB2_DEST_PORT;
280                 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
281         }
282         *ib2 = val;
283
284         return 0;
285 }
286
287 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
288                                  struct mtk_foe_entry *entry, bool egress,
289                                  __be32 src_addr, __be16 src_port,
290                                  __be32 dest_addr, __be16 dest_port)
291 {
292         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
293         struct mtk_ipv4_tuple *t;
294
295         switch (type) {
296         case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
297                 if (egress) {
298                         t = &entry->ipv4.new;
299                         break;
300                 }
301                 fallthrough;
302         case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
303         case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
304                 t = &entry->ipv4.orig;
305                 break;
306         case MTK_PPE_PKT_TYPE_IPV6_6RD:
307                 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
308                 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
309                 return 0;
310         default:
311                 WARN_ON_ONCE(1);
312                 return -EINVAL;
313         }
314
315         t->src_ip = be32_to_cpu(src_addr);
316         t->dest_ip = be32_to_cpu(dest_addr);
317
318         if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
319                 return 0;
320
321         t->src_port = be16_to_cpu(src_port);
322         t->dest_port = be16_to_cpu(dest_port);
323
324         return 0;
325 }
326
327 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
328                                  struct mtk_foe_entry *entry,
329                                  __be32 *src_addr, __be16 src_port,
330                                  __be32 *dest_addr, __be16 dest_port)
331 {
332         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
333         u32 *src, *dest;
334         int i;
335
336         switch (type) {
337         case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
338                 src = entry->dslite.tunnel_src_ip;
339                 dest = entry->dslite.tunnel_dest_ip;
340                 break;
341         case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
342         case MTK_PPE_PKT_TYPE_IPV6_6RD:
343                 entry->ipv6.src_port = be16_to_cpu(src_port);
344                 entry->ipv6.dest_port = be16_to_cpu(dest_port);
345                 fallthrough;
346         case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
347                 src = entry->ipv6.src_ip;
348                 dest = entry->ipv6.dest_ip;
349                 break;
350         default:
351                 WARN_ON_ONCE(1);
352                 return -EINVAL;
353         }
354
355         for (i = 0; i < 4; i++)
356                 src[i] = be32_to_cpu(src_addr[i]);
357         for (i = 0; i < 4; i++)
358                 dest[i] = be32_to_cpu(dest_addr[i]);
359
360         return 0;
361 }
362
363 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
364                           int port)
365 {
366         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
367
368         l2->etype = BIT(port);
369
370         if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
371                 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
372         else
373                 l2->etype |= BIT(8);
374
375         entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
376
377         return 0;
378 }
379
380 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
381                            int vid)
382 {
383         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
384
385         switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
386         case 0:
387                 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
388                               mtk_prep_ib1_vlan_layer(eth, 1);
389                 l2->vlan1 = vid;
390                 return 0;
391         case 1:
392                 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
393                         l2->vlan1 = vid;
394                         l2->etype |= BIT(8);
395                 } else {
396                         l2->vlan2 = vid;
397                         entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
398                 }
399                 return 0;
400         default:
401                 return -ENOSPC;
402         }
403 }
404
405 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
406                             int sid)
407 {
408         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
409
410         if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
411             (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
412                 l2->etype = ETH_P_PPP_SES;
413
414         entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
415         l2->pppoe_id = sid;
416
417         return 0;
418 }
419
420 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
421                            int wdma_idx, int txq, int bss, int wcid)
422 {
423         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
424         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
425
426         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
427                 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
428                 *ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
429                          MTK_FOE_IB2_WDMA_WINFO_V2;
430                 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
431                             FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
432         } else {
433                 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
434                 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
435                 if (wdma_idx)
436                         *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
437                 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
438                             FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
439                             FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
440         }
441
442         return 0;
443 }
444
445 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
446                             unsigned int queue)
447 {
448         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
449
450         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
451                 *ib2 &= ~MTK_FOE_IB2_QID_V2;
452                 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
453                 *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
454         } else {
455                 *ib2 &= ~MTK_FOE_IB2_QID;
456                 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
457                 *ib2 |= MTK_FOE_IB2_PSE_QOS;
458         }
459
460         return 0;
461 }
462
463 static bool
464 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
465                      struct mtk_foe_entry *data)
466 {
467         int type, len;
468
469         if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
470                 return false;
471
472         type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
473         if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
474                 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
475         else
476                 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
477
478         return !memcmp(&entry->data.data, &data->data, len - 4);
479 }
480
481 static void
482 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
483 {
484         struct hlist_head *head;
485         struct hlist_node *tmp;
486
487         if (entry->type == MTK_FLOW_TYPE_L2) {
488                 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
489                                        mtk_flow_l2_ht_params);
490
491                 head = &entry->l2_flows;
492                 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
493                         __mtk_foe_entry_clear(ppe, entry);
494                 return;
495         }
496
497         hlist_del_init(&entry->list);
498         if (entry->hash != 0xffff) {
499                 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
500
501                 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
502                 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
503                 dma_wmb();
504                 mtk_ppe_cache_clear(ppe);
505
506                 if (ppe->accounting) {
507                         struct mtk_foe_accounting *acct;
508
509                         acct = ppe->acct_table + entry->hash * sizeof(*acct);
510                         acct->packets = 0;
511                         acct->bytes = 0;
512                 }
513         }
514         entry->hash = 0xffff;
515
516         if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
517                 return;
518
519         hlist_del_init(&entry->l2_data.list);
520         kfree(entry);
521 }
522
523 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
524 {
525         u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
526         u16 now = mtk_eth_timestamp(ppe->eth);
527         u16 timestamp = ib1 & ib1_ts_mask;
528
529         if (timestamp > now)
530                 return ib1_ts_mask + 1 - timestamp + now;
531         else
532                 return now - timestamp;
533 }
534
535 static void
536 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
537 {
538         u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
539         struct mtk_flow_entry *cur;
540         struct mtk_foe_entry *hwe;
541         struct hlist_node *tmp;
542         int idle;
543
544         idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
545         hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
546                 int cur_idle;
547                 u32 ib1;
548
549                 hwe = mtk_foe_get_entry(ppe, cur->hash);
550                 ib1 = READ_ONCE(hwe->ib1);
551
552                 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
553                         cur->hash = 0xffff;
554                         __mtk_foe_entry_clear(ppe, cur);
555                         continue;
556                 }
557
558                 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
559                 if (cur_idle >= idle)
560                         continue;
561
562                 idle = cur_idle;
563                 entry->data.ib1 &= ~ib1_ts_mask;
564                 entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
565         }
566 }
567
568 static void
569 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
570 {
571         struct mtk_foe_entry foe = {};
572         struct mtk_foe_entry *hwe;
573
574         spin_lock_bh(&ppe_lock);
575
576         if (entry->type == MTK_FLOW_TYPE_L2) {
577                 mtk_flow_entry_update_l2(ppe, entry);
578                 goto out;
579         }
580
581         if (entry->hash == 0xffff)
582                 goto out;
583
584         hwe = mtk_foe_get_entry(ppe, entry->hash);
585         memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
586         if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
587                 entry->hash = 0xffff;
588                 goto out;
589         }
590
591         entry->data.ib1 = foe.ib1;
592
593 out:
594         spin_unlock_bh(&ppe_lock);
595 }
596
597 static void
598 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
599                        u16 hash)
600 {
601         struct mtk_eth *eth = ppe->eth;
602         u16 timestamp = mtk_eth_timestamp(eth);
603         struct mtk_foe_entry *hwe;
604         u32 val;
605
606         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
607                 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
608                 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
609                                          timestamp);
610         } else {
611                 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
612                 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
613                                          timestamp);
614         }
615
616         hwe = mtk_foe_get_entry(ppe, hash);
617         memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
618         wmb();
619         hwe->ib1 = entry->ib1;
620
621         if (ppe->accounting) {
622                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
623                         val = MTK_FOE_IB2_MIB_CNT_V2;
624                 else
625                         val = MTK_FOE_IB2_MIB_CNT;
626                 *mtk_foe_entry_ib2(eth, hwe) |= val;
627         }
628
629         dma_wmb();
630
631         mtk_ppe_cache_clear(ppe);
632 }
633
634 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
635 {
636         spin_lock_bh(&ppe_lock);
637         __mtk_foe_entry_clear(ppe, entry);
638         spin_unlock_bh(&ppe_lock);
639 }
640
641 static int
642 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
643 {
644         struct mtk_flow_entry *prev;
645
646         entry->type = MTK_FLOW_TYPE_L2;
647
648         prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
649                                                  mtk_flow_l2_ht_params);
650         if (likely(!prev))
651                 return 0;
652
653         if (IS_ERR(prev))
654                 return PTR_ERR(prev);
655
656         return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
657                                        &entry->l2_node, mtk_flow_l2_ht_params);
658 }
659
660 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
661 {
662         const struct mtk_soc_data *soc = ppe->eth->soc;
663         int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
664         u32 hash;
665
666         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
667                 return mtk_foe_entry_commit_l2(ppe, entry);
668
669         hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
670         entry->hash = 0xffff;
671         spin_lock_bh(&ppe_lock);
672         hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
673         spin_unlock_bh(&ppe_lock);
674
675         return 0;
676 }
677
678 static void
679 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
680                              u16 hash)
681 {
682         const struct mtk_soc_data *soc = ppe->eth->soc;
683         struct mtk_flow_entry *flow_info;
684         struct mtk_foe_entry foe = {}, *hwe;
685         struct mtk_foe_mac_info *l2;
686         u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
687         int type;
688
689         flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
690         if (!flow_info)
691                 return;
692
693         flow_info->l2_data.base_flow = entry;
694         flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
695         flow_info->hash = hash;
696         hlist_add_head(&flow_info->list,
697                        &ppe->foe_flow[hash / soc->hash_offset]);
698         hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
699
700         hwe = mtk_foe_get_entry(ppe, hash);
701         memcpy(&foe, hwe, soc->foe_entry_size);
702         foe.ib1 &= ib1_mask;
703         foe.ib1 |= entry->data.ib1 & ~ib1_mask;
704
705         l2 = mtk_foe_entry_l2(ppe->eth, &foe);
706         memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
707
708         type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
709         if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
710                 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
711         else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
712                 l2->etype = ETH_P_IPV6;
713
714         *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
715
716         __mtk_foe_entry_commit(ppe, &foe, hash);
717 }
718
719 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
720 {
721         const struct mtk_soc_data *soc = ppe->eth->soc;
722         struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
723         struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
724         struct mtk_flow_entry *entry;
725         struct mtk_foe_bridge key = {};
726         struct hlist_node *n;
727         struct ethhdr *eh;
728         bool found = false;
729         u8 *tag;
730
731         spin_lock_bh(&ppe_lock);
732
733         if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
734                 goto out;
735
736         hlist_for_each_entry_safe(entry, n, head, list) {
737                 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
738                         if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
739                                      MTK_FOE_STATE_BIND))
740                                 continue;
741
742                         entry->hash = 0xffff;
743                         __mtk_foe_entry_clear(ppe, entry);
744                         continue;
745                 }
746
747                 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
748                         if (entry->hash != 0xffff)
749                                 entry->hash = 0xffff;
750                         continue;
751                 }
752
753                 entry->hash = hash;
754                 __mtk_foe_entry_commit(ppe, &entry->data, hash);
755                 found = true;
756         }
757
758         if (found)
759                 goto out;
760
761         eh = eth_hdr(skb);
762         ether_addr_copy(key.dest_mac, eh->h_dest);
763         ether_addr_copy(key.src_mac, eh->h_source);
764         tag = skb->data - 2;
765         key.vlan = 0;
766         switch (skb->protocol) {
767 #if IS_ENABLED(CONFIG_NET_DSA)
768         case htons(ETH_P_XDSA):
769                 if (!netdev_uses_dsa(skb->dev) ||
770                     skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
771                         goto out;
772
773                 if (!skb_metadata_dst(skb))
774                         tag += 4;
775
776                 if (get_unaligned_be16(tag) != ETH_P_8021Q)
777                         break;
778
779                 fallthrough;
780 #endif
781         case htons(ETH_P_8021Q):
782                 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
783                 break;
784         default:
785                 break;
786         }
787
788         entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
789         if (!entry)
790                 goto out;
791
792         mtk_foe_entry_commit_subflow(ppe, entry, hash);
793
794 out:
795         spin_unlock_bh(&ppe_lock);
796 }
797
798 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
799 {
800         mtk_flow_entry_update(ppe, entry);
801
802         return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
803 }
804
805 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
806 {
807         if (!ppe)
808                 return -EINVAL;
809
810         /* disable KA */
811         ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
812         ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
813         ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
814         usleep_range(10000, 11000);
815
816         /* set KA timer to maximum */
817         ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
818         ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
819
820         /* set KA tick select */
821         ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
822         ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
823         usleep_range(10000, 11000);
824
825         /* disable scan mode */
826         ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
827         usleep_range(10000, 11000);
828
829         return mtk_ppe_wait_busy(ppe);
830 }
831
832 struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
833                                                  struct mtk_foe_accounting *diff)
834 {
835         struct mtk_foe_accounting *acct;
836         int size = sizeof(struct mtk_foe_accounting);
837         u64 bytes, packets;
838
839         if (!ppe->accounting)
840                 return NULL;
841
842         if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
843                 return NULL;
844
845         acct = ppe->acct_table + index * size;
846
847         acct->bytes += bytes;
848         acct->packets += packets;
849
850         if (diff) {
851                 diff->bytes = bytes;
852                 diff->packets = packets;
853         }
854
855         return acct;
856 }
857
858 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
859 {
860         bool accounting = eth->soc->has_accounting;
861         const struct mtk_soc_data *soc = eth->soc;
862         struct mtk_foe_accounting *acct;
863         struct device *dev = eth->dev;
864         struct mtk_mib_entry *mib;
865         struct mtk_ppe *ppe;
866         u32 foe_flow_size;
867         void *foe;
868
869         ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
870         if (!ppe)
871                 return NULL;
872
873         rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
874
875         /* need to allocate a separate device, since it PPE DMA access is
876          * not coherent.
877          */
878         ppe->base = base;
879         ppe->eth = eth;
880         ppe->dev = dev;
881         ppe->version = eth->soc->offload_version;
882         ppe->accounting = accounting;
883
884         foe = dmam_alloc_coherent(ppe->dev,
885                                   MTK_PPE_ENTRIES * soc->foe_entry_size,
886                                   &ppe->foe_phys, GFP_KERNEL);
887         if (!foe)
888                 goto err_free_l2_flows;
889
890         ppe->foe_table = foe;
891
892         foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
893                         sizeof(*ppe->foe_flow);
894         ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
895         if (!ppe->foe_flow)
896                 goto err_free_l2_flows;
897
898         if (accounting) {
899                 mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
900                                           &ppe->mib_phys, GFP_KERNEL);
901                 if (!mib)
902                         return NULL;
903
904                 ppe->mib_table = mib;
905
906                 acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
907                                     GFP_KERNEL);
908
909                 if (!acct)
910                         return NULL;
911
912                 ppe->acct_table = acct;
913         }
914
915         mtk_ppe_debugfs_init(ppe, index);
916
917         return ppe;
918
919 err_free_l2_flows:
920         rhashtable_destroy(&ppe->l2_flows);
921         return NULL;
922 }
923
924 void mtk_ppe_deinit(struct mtk_eth *eth)
925 {
926         int i;
927
928         for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
929                 if (!eth->ppe[i])
930                         return;
931                 rhashtable_destroy(&eth->ppe[i]->l2_flows);
932         }
933 }
934
935 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
936 {
937         static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
938         int i, k;
939
940         memset(ppe->foe_table, 0,
941                MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
942
943         if (!IS_ENABLED(CONFIG_SOC_MT7621))
944                 return;
945
946         /* skip all entries that cross the 1024 byte boundary */
947         for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
948                 for (k = 0; k < ARRAY_SIZE(skip); k++) {
949                         struct mtk_foe_entry *hwe;
950
951                         hwe = mtk_foe_get_entry(ppe, i + skip[k]);
952                         hwe->ib1 |= MTK_FOE_IB1_STATIC;
953                 }
954         }
955 }
956
957 void mtk_ppe_start(struct mtk_ppe *ppe)
958 {
959         u32 val;
960
961         if (!ppe)
962                 return;
963
964         mtk_ppe_init_foe_table(ppe);
965         ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
966
967         val = MTK_PPE_TB_CFG_ENTRY_80B |
968               MTK_PPE_TB_CFG_AGE_NON_L4 |
969               MTK_PPE_TB_CFG_AGE_UNBIND |
970               MTK_PPE_TB_CFG_AGE_TCP |
971               MTK_PPE_TB_CFG_AGE_UDP |
972               MTK_PPE_TB_CFG_AGE_TCP_FIN |
973               FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
974                          MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
975               FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
976                          MTK_PPE_KEEPALIVE_DISABLE) |
977               FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
978               FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
979                          MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
980               FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
981                          MTK_PPE_ENTRIES_SHIFT);
982         if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
983                 val |= MTK_PPE_TB_CFG_INFO_SEL;
984         ppe_w32(ppe, MTK_PPE_TB_CFG, val);
985
986         ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
987                 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
988
989         mtk_ppe_cache_enable(ppe, true);
990
991         val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
992               MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
993               MTK_PPE_FLOW_CFG_IP6_6RD |
994               MTK_PPE_FLOW_CFG_IP4_NAT |
995               MTK_PPE_FLOW_CFG_IP4_NAPT |
996               MTK_PPE_FLOW_CFG_IP4_DSLITE |
997               MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
998         if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
999                 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
1000                        MTK_PPE_MD_TOAP_BYP_CRSN1 |
1001                        MTK_PPE_MD_TOAP_BYP_CRSN2 |
1002                        MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
1003         else
1004                 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
1005                        MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
1006         ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1007
1008         val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
1009               FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
1010         ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
1011
1012         val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
1013               FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
1014         ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
1015
1016         val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
1017               FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
1018         ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
1019
1020         val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
1021         ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
1022
1023         val = MTK_PPE_BIND_LIMIT1_FULL |
1024               FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
1025         ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
1026
1027         val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
1028               FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
1029         ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
1030
1031         /* enable PPE */
1032         val = MTK_PPE_GLO_CFG_EN |
1033               MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
1034               MTK_PPE_GLO_CFG_IP4_CS_DROP |
1035               MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
1036         ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
1037
1038         ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
1039
1040         if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
1041                 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
1042                 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
1043         }
1044
1045         if (ppe->accounting && ppe->mib_phys) {
1046                 ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
1047                 ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
1048                         MTK_PPE_MIB_CFG_EN);
1049                 ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
1050                         MTK_PPE_MIB_CFG_RD_CLR);
1051                 ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
1052                         MTK_PPE_MIB_CFG_RD_CLR);
1053         }
1054 }
1055
1056 int mtk_ppe_stop(struct mtk_ppe *ppe)
1057 {
1058         u32 val;
1059         int i;
1060
1061         if (!ppe)
1062                 return 0;
1063
1064         for (i = 0; i < MTK_PPE_ENTRIES; i++) {
1065                 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
1066
1067                 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
1068                                       MTK_FOE_STATE_INVALID);
1069         }
1070
1071         mtk_ppe_cache_enable(ppe, false);
1072
1073         /* disable offload engine */
1074         ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
1075         ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
1076
1077         /* disable aging */
1078         val = MTK_PPE_TB_CFG_AGE_NON_L4 |
1079               MTK_PPE_TB_CFG_AGE_UNBIND |
1080               MTK_PPE_TB_CFG_AGE_TCP |
1081               MTK_PPE_TB_CFG_AGE_UDP |
1082               MTK_PPE_TB_CFG_AGE_TCP_FIN;
1083         ppe_clear(ppe, MTK_PPE_TB_CFG, val);
1084
1085         return mtk_ppe_wait_busy(ppe);
1086 }