1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
5 #include "ice_switch.h"
7 #define ICE_ETH_DA_OFFSET 0
8 #define ICE_ETH_ETHTYPE_OFFSET 12
9 #define ICE_ETH_VLAN_TCI_OFFSET 14
10 #define ICE_MAX_VLAN_ID 0xFFF
11 #define ICE_IPV6_ETHER_ID 0x86DD
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14 * struct to configure any switch filter rules.
15 * {DA (6 bytes), SA(6 bytes),
16 * Ether type (2 bytes for header without VLAN tag) OR
17 * VLAN tag (4 bytes for header with VLAN tag) }
19 * Word on Hardcoded values
20 * byte 0 = 0x2: to identify it as locally administered DA MAC
21 * byte 6 = 0x2: to identify it as locally administered SA MAC
22 * byte 12 = 0x81 & byte 13 = 0x00:
23 * In case of VLAN filter first two bytes defines ether type (0x8100)
24 * and remaining two bytes are placeholder for programming a given VLAN ID
25 * In case of Ether type filter it is treated as header without VLAN tag
26 * and byte 12 and 13 is used to program a given Ether type instead
28 #define DUMMY_ETH_HDR_LEN 16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 ICE_PKT_OUTER_IPV6 = BIT(0),
35 ICE_PKT_TUN_GTPC = BIT(1),
36 ICE_PKT_TUN_GTPU = BIT(2),
37 ICE_PKT_TUN_NVGRE = BIT(3),
38 ICE_PKT_TUN_UDP = BIT(4),
39 ICE_PKT_INNER_IPV6 = BIT(5),
40 ICE_PKT_INNER_TCP = BIT(6),
41 ICE_PKT_INNER_UDP = BIT(7),
42 ICE_PKT_GTP_NOPAY = BIT(8),
43 ICE_PKT_KMALLOC = BIT(9),
44 ICE_PKT_PPPOE = BIT(10),
47 struct ice_dummy_pkt_offsets {
48 enum ice_protocol_type type;
49 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
52 struct ice_dummy_pkt_profile {
53 const struct ice_dummy_pkt_offsets *offsets;
60 #define ICE_DECLARE_PKT_OFFSETS(type) \
61 static const struct ice_dummy_pkt_offsets \
62 ice_dummy_##type##_packet_offsets[]
64 #define ICE_DECLARE_PKT_TEMPLATE(type) \
65 static const u8 ice_dummy_##type##_packet[]
67 #define ICE_PKT_PROFILE(type, m) { \
69 .pkt = ice_dummy_##type##_packet, \
70 .pkt_len = sizeof(ice_dummy_##type##_packet), \
71 .offsets = ice_dummy_##type##_packet_offsets, \
72 .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
75 ICE_DECLARE_PKT_OFFSETS(vlan) = {
76 { ICE_VLAN_OFOS, 12 },
79 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
80 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
83 ICE_DECLARE_PKT_OFFSETS(qinq) = {
88 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
89 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
90 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
93 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
96 { ICE_IPV4_OFOS, 14 },
102 { ICE_PROTOCOL_LAST, 0 },
105 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
106 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x08, 0x00, /* ICE_ETYPE_OL 12 */
112 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
113 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x2F, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
118 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
119 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_IL 54 */
127 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x06, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
134 0x00, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00,
136 0x50, 0x02, 0x20, 0x00,
137 0x00, 0x00, 0x00, 0x00
140 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
142 { ICE_ETYPE_OL, 12 },
143 { ICE_IPV4_OFOS, 14 },
146 { ICE_ETYPE_IL, 54 },
148 { ICE_UDP_ILOS, 76 },
149 { ICE_PROTOCOL_LAST, 0 },
152 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
153 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
157 0x08, 0x00, /* ICE_ETYPE_OL 12 */
159 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
160 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x2F, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
165 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
166 0x00, 0x00, 0x00, 0x00,
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_IL 54 */
174 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
181 0x00, 0x08, 0x00, 0x00,
184 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
186 { ICE_ETYPE_OL, 12 },
187 { ICE_IPV4_OFOS, 14 },
191 { ICE_VXLAN_GPE, 42 },
193 { ICE_ETYPE_IL, 62 },
196 { ICE_PROTOCOL_LAST, 0 },
199 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
200 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
201 0x00, 0x00, 0x00, 0x00,
202 0x00, 0x00, 0x00, 0x00,
204 0x08, 0x00, /* ICE_ETYPE_OL 12 */
206 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
207 0x00, 0x01, 0x00, 0x00,
208 0x40, 0x11, 0x00, 0x00,
209 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
213 0x00, 0x46, 0x00, 0x00,
215 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
216 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
222 0x08, 0x00, /* ICE_ETYPE_IL 62 */
224 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
225 0x00, 0x01, 0x00, 0x00,
226 0x40, 0x06, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
231 0x00, 0x00, 0x00, 0x00,
232 0x00, 0x00, 0x00, 0x00,
233 0x50, 0x02, 0x20, 0x00,
234 0x00, 0x00, 0x00, 0x00
237 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
239 { ICE_ETYPE_OL, 12 },
240 { ICE_IPV4_OFOS, 14 },
244 { ICE_VXLAN_GPE, 42 },
246 { ICE_ETYPE_IL, 62 },
248 { ICE_UDP_ILOS, 84 },
249 { ICE_PROTOCOL_LAST, 0 },
252 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
253 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
254 0x00, 0x00, 0x00, 0x00,
255 0x00, 0x00, 0x00, 0x00,
257 0x08, 0x00, /* ICE_ETYPE_OL 12 */
259 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
260 0x00, 0x01, 0x00, 0x00,
261 0x00, 0x11, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00,
265 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
266 0x00, 0x3a, 0x00, 0x00,
268 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
269 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
272 0x00, 0x00, 0x00, 0x00,
273 0x00, 0x00, 0x00, 0x00,
275 0x08, 0x00, /* ICE_ETYPE_IL 62 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
284 0x00, 0x08, 0x00, 0x00,
287 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
289 { ICE_ETYPE_OL, 12 },
290 { ICE_IPV4_OFOS, 14 },
293 { ICE_ETYPE_IL, 54 },
296 { ICE_PROTOCOL_LAST, 0 },
299 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x2F, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
313 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
316 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
319 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
321 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
322 0x00, 0x08, 0x06, 0x40,
323 0x00, 0x00, 0x00, 0x00,
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x50, 0x02, 0x20, 0x00,
336 0x00, 0x00, 0x00, 0x00
339 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
341 { ICE_ETYPE_OL, 12 },
342 { ICE_IPV4_OFOS, 14 },
345 { ICE_ETYPE_IL, 54 },
347 { ICE_UDP_ILOS, 96 },
348 { ICE_PROTOCOL_LAST, 0 },
351 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
352 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
353 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, 0x00, 0x00,
356 0x08, 0x00, /* ICE_ETYPE_OL 12 */
358 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
359 0x00, 0x00, 0x00, 0x00,
360 0x00, 0x2F, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
364 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
365 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
371 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
373 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
374 0x00, 0x08, 0x11, 0x40,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
385 0x00, 0x08, 0x00, 0x00,
388 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
390 { ICE_ETYPE_OL, 12 },
391 { ICE_IPV4_OFOS, 14 },
395 { ICE_VXLAN_GPE, 42 },
397 { ICE_ETYPE_IL, 62 },
400 { ICE_PROTOCOL_LAST, 0 },
403 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x08, 0x00, /* ICE_ETYPE_OL 12 */
410 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
411 0x00, 0x01, 0x00, 0x00,
412 0x40, 0x11, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
417 0x00, 0x5a, 0x00, 0x00,
419 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
426 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
428 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
429 0x00, 0x08, 0x06, 0x40,
430 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, 0x00, 0x00,
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442 0x50, 0x02, 0x20, 0x00,
443 0x00, 0x00, 0x00, 0x00
446 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
448 { ICE_ETYPE_OL, 12 },
449 { ICE_IPV4_OFOS, 14 },
453 { ICE_VXLAN_GPE, 42 },
455 { ICE_ETYPE_IL, 62 },
457 { ICE_UDP_ILOS, 104 },
458 { ICE_PROTOCOL_LAST, 0 },
461 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
462 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
463 0x00, 0x00, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00,
466 0x08, 0x00, /* ICE_ETYPE_OL 12 */
468 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
469 0x00, 0x01, 0x00, 0x00,
470 0x00, 0x11, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
475 0x00, 0x4e, 0x00, 0x00,
477 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
478 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
481 0x00, 0x00, 0x00, 0x00,
482 0x00, 0x00, 0x00, 0x00,
484 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
487 0x00, 0x08, 0x11, 0x40,
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
498 0x00, 0x08, 0x00, 0x00,
501 /* offset info for MAC + IPv4 + UDP dummy packet */
502 ICE_DECLARE_PKT_OFFSETS(udp) = {
504 { ICE_ETYPE_OL, 12 },
505 { ICE_IPV4_OFOS, 14 },
506 { ICE_UDP_ILOS, 34 },
507 { ICE_PROTOCOL_LAST, 0 },
510 /* Dummy packet for MAC + IPv4 + UDP */
511 ICE_DECLARE_PKT_TEMPLATE(udp) = {
512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
516 0x08, 0x00, /* ICE_ETYPE_OL 12 */
518 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
519 0x00, 0x01, 0x00, 0x00,
520 0x00, 0x11, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
525 0x00, 0x08, 0x00, 0x00,
527 0x00, 0x00, /* 2 bytes for 4 byte alignment */
530 /* offset info for MAC + IPv4 + TCP dummy packet */
531 ICE_DECLARE_PKT_OFFSETS(tcp) = {
533 { ICE_ETYPE_OL, 12 },
534 { ICE_IPV4_OFOS, 14 },
536 { ICE_PROTOCOL_LAST, 0 },
539 /* Dummy packet for MAC + IPv4 + TCP */
540 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
541 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
545 0x08, 0x00, /* ICE_ETYPE_OL 12 */
547 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
548 0x00, 0x01, 0x00, 0x00,
549 0x00, 0x06, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
554 0x00, 0x00, 0x00, 0x00,
555 0x00, 0x00, 0x00, 0x00,
556 0x50, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
559 0x00, 0x00, /* 2 bytes for 4 byte alignment */
562 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
564 { ICE_ETYPE_OL, 12 },
565 { ICE_IPV6_OFOS, 14 },
567 { ICE_PROTOCOL_LAST, 0 },
570 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
571 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
572 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
575 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
577 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
578 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
579 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
591 0x50, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
594 0x00, 0x00, /* 2 bytes for 4 byte alignment */
598 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
600 { ICE_ETYPE_OL, 12 },
601 { ICE_IPV6_OFOS, 14 },
602 { ICE_UDP_ILOS, 54 },
603 { ICE_PROTOCOL_LAST, 0 },
606 /* IPv6 + UDP dummy packet */
607 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
608 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
612 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
614 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
615 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x00, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
623 0x00, 0x00, 0x00, 0x00,
625 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
626 0x00, 0x10, 0x00, 0x00,
628 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
629 0x00, 0x00, 0x00, 0x00,
631 0x00, 0x00, /* 2 bytes for 4 byte alignment */
634 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
635 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
637 { ICE_IPV4_OFOS, 14 },
642 { ICE_PROTOCOL_LAST, 0 },
645 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
646 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
651 0x45, 0x00, 0x00, 0x58, /* IP 14 */
652 0x00, 0x00, 0x00, 0x00,
653 0x00, 0x11, 0x00, 0x00,
654 0x00, 0x00, 0x00, 0x00,
655 0x00, 0x00, 0x00, 0x00,
657 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
658 0x00, 0x44, 0x00, 0x00,
660 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
661 0x00, 0x00, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x85,
664 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
665 0x00, 0x00, 0x00, 0x00,
667 0x45, 0x00, 0x00, 0x28, /* IP 62 */
668 0x00, 0x00, 0x00, 0x00,
669 0x00, 0x06, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
671 0x00, 0x00, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
674 0x00, 0x00, 0x00, 0x00,
675 0x00, 0x00, 0x00, 0x00,
676 0x50, 0x00, 0x00, 0x00,
677 0x00, 0x00, 0x00, 0x00,
679 0x00, 0x00, /* 2 bytes for 4 byte alignment */
682 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
683 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
685 { ICE_IPV4_OFOS, 14 },
689 { ICE_UDP_ILOS, 82 },
690 { ICE_PROTOCOL_LAST, 0 },
693 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
694 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
695 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00,
699 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
700 0x00, 0x00, 0x00, 0x00,
701 0x00, 0x11, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x00,
703 0x00, 0x00, 0x00, 0x00,
705 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
706 0x00, 0x38, 0x00, 0x00,
708 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x85,
712 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
713 0x00, 0x00, 0x00, 0x00,
715 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
716 0x00, 0x00, 0x00, 0x00,
717 0x00, 0x11, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
722 0x00, 0x08, 0x00, 0x00,
724 0x00, 0x00, /* 2 bytes for 4 byte alignment */
727 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
728 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
730 { ICE_IPV4_OFOS, 14 },
735 { ICE_PROTOCOL_LAST, 0 },
738 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
739 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
740 0x00, 0x00, 0x00, 0x00,
741 0x00, 0x00, 0x00, 0x00,
744 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x11, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
751 0x00, 0x58, 0x00, 0x00,
753 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
754 0x00, 0x00, 0x00, 0x00,
755 0x00, 0x00, 0x00, 0x85,
757 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
758 0x00, 0x00, 0x00, 0x00,
760 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
761 0x00, 0x14, 0x06, 0x00,
762 0x00, 0x00, 0x00, 0x00,
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x50, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, /* 2 bytes for 4 byte alignment */
780 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
782 { ICE_IPV4_OFOS, 14 },
786 { ICE_UDP_ILOS, 102 },
787 { ICE_PROTOCOL_LAST, 0 },
790 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
791 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x00, 0x00, 0x00,
796 0x45, 0x00, 0x00, 0x60, /* IP 14 */
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x11, 0x00, 0x00,
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00,
802 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
803 0x00, 0x4c, 0x00, 0x00,
805 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x85,
809 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
810 0x00, 0x00, 0x00, 0x00,
812 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
813 0x00, 0x08, 0x11, 0x00,
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
824 0x00, 0x08, 0x00, 0x00,
826 0x00, 0x00, /* 2 bytes for 4 byte alignment */
829 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
831 { ICE_IPV6_OFOS, 14 },
836 { ICE_PROTOCOL_LAST, 0 },
839 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
840 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
841 0x00, 0x00, 0x00, 0x00,
842 0x00, 0x00, 0x00, 0x00,
845 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
846 0x00, 0x44, 0x11, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
857 0x00, 0x44, 0x00, 0x00,
859 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
860 0x00, 0x00, 0x00, 0x00,
861 0x00, 0x00, 0x00, 0x85,
863 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
864 0x00, 0x00, 0x00, 0x00,
866 0x45, 0x00, 0x00, 0x28, /* IP 82 */
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x06, 0x00, 0x00,
869 0x00, 0x00, 0x00, 0x00,
870 0x00, 0x00, 0x00, 0x00,
872 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x50, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, /* 2 bytes for 4 byte alignment */
881 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
883 { ICE_IPV6_OFOS, 14 },
887 { ICE_UDP_ILOS, 102 },
888 { ICE_PROTOCOL_LAST, 0 },
891 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
892 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
893 0x00, 0x00, 0x00, 0x00,
894 0x00, 0x00, 0x00, 0x00,
897 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
898 0x00, 0x38, 0x11, 0x00,
899 0x00, 0x00, 0x00, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
908 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
909 0x00, 0x38, 0x00, 0x00,
911 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
912 0x00, 0x00, 0x00, 0x00,
913 0x00, 0x00, 0x00, 0x85,
915 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
916 0x00, 0x00, 0x00, 0x00,
918 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
919 0x00, 0x00, 0x00, 0x00,
920 0x00, 0x11, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
925 0x00, 0x08, 0x00, 0x00,
927 0x00, 0x00, /* 2 bytes for 4 byte alignment */
930 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
932 { ICE_IPV6_OFOS, 14 },
937 { ICE_PROTOCOL_LAST, 0 },
940 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
941 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
942 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
946 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
947 0x00, 0x58, 0x11, 0x00,
948 0x00, 0x00, 0x00, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
953 0x00, 0x00, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00,
957 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
958 0x00, 0x58, 0x00, 0x00,
960 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
961 0x00, 0x00, 0x00, 0x00,
962 0x00, 0x00, 0x00, 0x85,
964 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
965 0x00, 0x00, 0x00, 0x00,
967 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
968 0x00, 0x14, 0x06, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
975 0x00, 0x00, 0x00, 0x00,
976 0x00, 0x00, 0x00, 0x00,
978 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
979 0x00, 0x00, 0x00, 0x00,
980 0x00, 0x00, 0x00, 0x00,
981 0x50, 0x00, 0x00, 0x00,
982 0x00, 0x00, 0x00, 0x00,
984 0x00, 0x00, /* 2 bytes for 4 byte alignment */
987 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
989 { ICE_IPV6_OFOS, 14 },
993 { ICE_UDP_ILOS, 122 },
994 { ICE_PROTOCOL_LAST, 0 },
997 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
998 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, 0x00, 0x00,
1003 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1004 0x00, 0x4c, 0x11, 0x00,
1005 0x00, 0x00, 0x00, 0x00,
1006 0x00, 0x00, 0x00, 0x00,
1007 0x00, 0x00, 0x00, 0x00,
1008 0x00, 0x00, 0x00, 0x00,
1009 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00,
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1015 0x00, 0x4c, 0x00, 0x00,
1017 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1018 0x00, 0x00, 0x00, 0x00,
1019 0x00, 0x00, 0x00, 0x85,
1021 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1022 0x00, 0x00, 0x00, 0x00,
1024 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1025 0x00, 0x08, 0x11, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1027 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00,
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, 0x00, 0x00,
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x00,
1033 0x00, 0x00, 0x00, 0x00,
1035 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1036 0x00, 0x08, 0x00, 0x00,
1038 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1041 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1042 { ICE_MAC_OFOS, 0 },
1043 { ICE_IPV4_OFOS, 14 },
1045 { ICE_GTP_NO_PAY, 42 },
1046 { ICE_PROTOCOL_LAST, 0 },
1049 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1050 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1051 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00,
1055 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1056 0x00, 0x00, 0x40, 0x00,
1057 0x40, 0x11, 0x00, 0x00,
1058 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1061 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1062 0x00, 0x00, 0x00, 0x00,
1064 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1065 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x85,
1068 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1069 0x00, 0x00, 0x00, 0x00,
1071 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1072 0x00, 0x00, 0x40, 0x00,
1073 0x40, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1079 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1080 { ICE_MAC_OFOS, 0 },
1081 { ICE_IPV6_OFOS, 14 },
1083 { ICE_GTP_NO_PAY, 62 },
1084 { ICE_PROTOCOL_LAST, 0 },
1087 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1088 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1093 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1094 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1095 0x00, 0x00, 0x00, 0x00,
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1104 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1105 0x00, 0x00, 0x00, 0x00,
1107 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1108 0x00, 0x00, 0x00, 0x00,
1113 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1114 { ICE_MAC_OFOS, 0 },
1115 { ICE_ETYPE_OL, 12 },
1117 { ICE_IPV4_OFOS, 22 },
1119 { ICE_PROTOCOL_LAST, 0 },
1122 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1123 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1124 0x00, 0x00, 0x00, 0x00,
1125 0x00, 0x00, 0x00, 0x00,
1127 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1129 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1132 0x00, 0x21, /* PPP Link Layer 20 */
1134 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1135 0x00, 0x01, 0x00, 0x00,
1136 0x00, 0x06, 0x00, 0x00,
1137 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1140 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1141 0x00, 0x00, 0x00, 0x00,
1142 0x00, 0x00, 0x00, 0x00,
1143 0x50, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00,
1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1149 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1150 { ICE_MAC_OFOS, 0 },
1151 { ICE_ETYPE_OL, 12 },
1153 { ICE_IPV4_OFOS, 22 },
1154 { ICE_UDP_ILOS, 42 },
1155 { ICE_PROTOCOL_LAST, 0 },
1158 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1159 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1160 0x00, 0x00, 0x00, 0x00,
1161 0x00, 0x00, 0x00, 0x00,
1163 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1165 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1168 0x00, 0x21, /* PPP Link Layer 20 */
1170 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1171 0x00, 0x01, 0x00, 0x00,
1172 0x00, 0x11, 0x00, 0x00,
1173 0x00, 0x00, 0x00, 0x00,
1174 0x00, 0x00, 0x00, 0x00,
1176 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1177 0x00, 0x08, 0x00, 0x00,
1179 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1182 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1183 { ICE_MAC_OFOS, 0 },
1184 { ICE_ETYPE_OL, 12 },
1186 { ICE_IPV6_OFOS, 22 },
1188 { ICE_PROTOCOL_LAST, 0 },
1191 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1192 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1193 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00,
1196 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1198 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1201 0x00, 0x57, /* PPP Link Layer 20 */
1203 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1204 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1205 0x00, 0x00, 0x00, 0x00,
1206 0x00, 0x00, 0x00, 0x00,
1207 0x00, 0x00, 0x00, 0x00,
1208 0x00, 0x00, 0x00, 0x00,
1209 0x00, 0x00, 0x00, 0x00,
1210 0x00, 0x00, 0x00, 0x00,
1211 0x00, 0x00, 0x00, 0x00,
1212 0x00, 0x00, 0x00, 0x00,
1214 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1215 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00,
1217 0x50, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00,
1220 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1223 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1224 { ICE_MAC_OFOS, 0 },
1225 { ICE_ETYPE_OL, 12 },
1227 { ICE_IPV6_OFOS, 22 },
1228 { ICE_UDP_ILOS, 62 },
1229 { ICE_PROTOCOL_LAST, 0 },
1232 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1233 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1234 0x00, 0x00, 0x00, 0x00,
1235 0x00, 0x00, 0x00, 0x00,
1237 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1239 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1242 0x00, 0x57, /* PPP Link Layer 20 */
1244 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1245 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1246 0x00, 0x00, 0x00, 0x00,
1247 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x00, 0x00,
1249 0x00, 0x00, 0x00, 0x00,
1250 0x00, 0x00, 0x00, 0x00,
1251 0x00, 0x00, 0x00, 0x00,
1252 0x00, 0x00, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1256 0x00, 0x08, 0x00, 0x00,
1258 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1261 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1262 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1264 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1265 ICE_PKT_OUTER_IPV6 |
1266 ICE_PKT_INNER_IPV6 |
1268 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1269 ICE_PKT_OUTER_IPV6 |
1270 ICE_PKT_INNER_IPV6),
1271 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1272 ICE_PKT_OUTER_IPV6 |
1274 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1275 ICE_PKT_OUTER_IPV6),
1276 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1277 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1278 ICE_PKT_INNER_IPV6 |
1280 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1281 ICE_PKT_INNER_IPV6),
1282 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1284 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1285 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1286 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1287 ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1289 ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1290 ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1291 ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1292 ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1294 ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1295 ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1296 ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1297 ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1298 ICE_PKT_INNER_IPV6 |
1300 ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1301 ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1302 ICE_PKT_INNER_IPV6),
1303 ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1304 ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1305 ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1306 ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1307 ICE_PKT_PROFILE(tcp, 0),
1310 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
1311 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
1312 ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1313 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
1314 ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1315 #define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
1316 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
1318 /* this is a recipe to profile association bitmap */
1319 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1320 ICE_MAX_NUM_PROFILES);
1322 /* this is a profile to recipe association bitmap */
1323 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1324 ICE_MAX_NUM_RECIPES);
1327 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1328 * @hw: pointer to the HW struct
1330 * Allocate memory for the entire recipe table and initialize the structures/
1331 * entries corresponding to basic recipes.
1333 int ice_init_def_sw_recp(struct ice_hw *hw)
1335 struct ice_sw_recipe *recps;
1338 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1339 sizeof(*recps), GFP_KERNEL);
1343 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1344 recps[i].root_rid = i;
1345 INIT_LIST_HEAD(&recps[i].filt_rules);
1346 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1347 INIT_LIST_HEAD(&recps[i].rg_list);
1348 mutex_init(&recps[i].filt_rule_lock);
1351 hw->switch_info->recp_list = recps;
1357 * ice_aq_get_sw_cfg - get switch configuration
1358 * @hw: pointer to the hardware structure
1359 * @buf: pointer to the result buffer
1360 * @buf_size: length of the buffer available for response
1361 * @req_desc: pointer to requested descriptor
1362 * @num_elems: pointer to number of elements
1363 * @cd: pointer to command details structure or NULL
1365 * Get switch configuration (0x0200) to be placed in buf.
1366 * This admin command returns information such as initial VSI/port number
1367 * and switch ID it belongs to.
1369 * NOTE: *req_desc is both an input/output parameter.
1370 * The caller of this function first calls this function with *request_desc set
1371 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1372 * configuration information has been returned; if non-zero (meaning not all
1373 * the information was returned), the caller should call this function again
1374 * with *req_desc set to the previous value returned by f/w to get the
1375 * next block of switch configuration information.
1377 * *num_elems is output only parameter. This reflects the number of elements
1378 * in response buffer. The caller of this function to use *num_elems while
1379 * parsing the response buffer.
1382 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1383 u16 buf_size, u16 *req_desc, u16 *num_elems,
1384 struct ice_sq_cd *cd)
1386 struct ice_aqc_get_sw_cfg *cmd;
1387 struct ice_aq_desc desc;
1390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1391 cmd = &desc.params.get_sw_conf;
1392 cmd->element = cpu_to_le16(*req_desc);
1394 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1396 *req_desc = le16_to_cpu(cmd->element);
1397 *num_elems = le16_to_cpu(cmd->num_elems);
1405 * @hw: pointer to the HW struct
1406 * @vsi_ctx: pointer to a VSI context struct
1407 * @cd: pointer to command details structure or NULL
1409 * Add a VSI context to the hardware (0x0210)
1412 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1413 struct ice_sq_cd *cd)
1415 struct ice_aqc_add_update_free_vsi_resp *res;
1416 struct ice_aqc_add_get_update_free_vsi *cmd;
1417 struct ice_aq_desc desc;
1420 cmd = &desc.params.vsi_cmd;
1421 res = &desc.params.add_update_free_vsi_res;
1423 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1425 if (!vsi_ctx->alloc_from_pool)
1426 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1427 ICE_AQ_VSI_IS_VALID);
1428 cmd->vf_id = vsi_ctx->vf_num;
1430 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1432 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1434 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1435 sizeof(vsi_ctx->info), cd);
1438 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1439 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1440 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1448 * @hw: pointer to the HW struct
1449 * @vsi_ctx: pointer to a VSI context struct
1450 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1451 * @cd: pointer to command details structure or NULL
1453 * Free VSI context info from hardware (0x0213)
1456 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1457 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1459 struct ice_aqc_add_update_free_vsi_resp *resp;
1460 struct ice_aqc_add_get_update_free_vsi *cmd;
1461 struct ice_aq_desc desc;
1464 cmd = &desc.params.vsi_cmd;
1465 resp = &desc.params.add_update_free_vsi_res;
1467 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1469 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1471 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1473 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1475 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1476 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1484 * @hw: pointer to the HW struct
1485 * @vsi_ctx: pointer to a VSI context struct
1486 * @cd: pointer to command details structure or NULL
1488 * Update VSI context in the hardware (0x0211)
1491 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1492 struct ice_sq_cd *cd)
1494 struct ice_aqc_add_update_free_vsi_resp *resp;
1495 struct ice_aqc_add_get_update_free_vsi *cmd;
1496 struct ice_aq_desc desc;
1499 cmd = &desc.params.vsi_cmd;
1500 resp = &desc.params.add_update_free_vsi_res;
1502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1504 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1506 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1508 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1509 sizeof(vsi_ctx->info), cd);
1512 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1513 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1520 * ice_is_vsi_valid - check whether the VSI is valid or not
1521 * @hw: pointer to the HW struct
1522 * @vsi_handle: VSI handle
1524 * check whether the VSI is valid or not
1526 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1528 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1532 * ice_get_hw_vsi_num - return the HW VSI number
1533 * @hw: pointer to the HW struct
1534 * @vsi_handle: VSI handle
1536 * return the HW VSI number
1537 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1539 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1541 return hw->vsi_ctx[vsi_handle]->vsi_num;
1545 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1546 * @hw: pointer to the HW struct
1547 * @vsi_handle: VSI handle
1549 * return the VSI context entry for a given VSI handle
1551 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1553 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1557 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1558 * @hw: pointer to the HW struct
1559 * @vsi_handle: VSI handle
1560 * @vsi: VSI context pointer
1562 * save the VSI context entry for a given VSI handle
1565 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1567 hw->vsi_ctx[vsi_handle] = vsi;
1571 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1572 * @hw: pointer to the HW struct
1573 * @vsi_handle: VSI handle
1575 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1577 struct ice_vsi_ctx *vsi;
1580 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1583 ice_for_each_traffic_class(i) {
1584 if (vsi->lan_q_ctx[i]) {
1585 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1586 vsi->lan_q_ctx[i] = NULL;
1588 if (vsi->rdma_q_ctx[i]) {
1589 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1590 vsi->rdma_q_ctx[i] = NULL;
1596 * ice_clear_vsi_ctx - clear the VSI context entry
1597 * @hw: pointer to the HW struct
1598 * @vsi_handle: VSI handle
1600 * clear the VSI context entry
1602 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1604 struct ice_vsi_ctx *vsi;
1606 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1608 ice_clear_vsi_q_ctx(hw, vsi_handle);
1609 devm_kfree(ice_hw_to_dev(hw), vsi);
1610 hw->vsi_ctx[vsi_handle] = NULL;
1615 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1616 * @hw: pointer to the HW struct
1618 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1622 for (i = 0; i < ICE_MAX_VSI; i++)
1623 ice_clear_vsi_ctx(hw, i);
1627 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1628 * @hw: pointer to the HW struct
1629 * @vsi_handle: unique VSI handle provided by drivers
1630 * @vsi_ctx: pointer to a VSI context struct
1631 * @cd: pointer to command details structure or NULL
1633 * Add a VSI context to the hardware also add it into the VSI handle list.
1634 * If this function gets called after reset for existing VSIs then update
1635 * with the new HW VSI number in the corresponding VSI handle list entry.
1638 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1639 struct ice_sq_cd *cd)
1641 struct ice_vsi_ctx *tmp_vsi_ctx;
1644 if (vsi_handle >= ICE_MAX_VSI)
1646 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1649 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1651 /* Create a new VSI context */
1652 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1653 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1655 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1658 *tmp_vsi_ctx = *vsi_ctx;
1659 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1661 /* update with new HW VSI num */
1662 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1669 * ice_free_vsi- free VSI context from hardware and VSI handle list
1670 * @hw: pointer to the HW struct
1671 * @vsi_handle: unique VSI handle
1672 * @vsi_ctx: pointer to a VSI context struct
1673 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1674 * @cd: pointer to command details structure or NULL
1676 * Free VSI context info from hardware as well as from VSI handle list
1679 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1680 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1684 if (!ice_is_vsi_valid(hw, vsi_handle))
1686 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1687 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1689 ice_clear_vsi_ctx(hw, vsi_handle);
1695 * @hw: pointer to the HW struct
1696 * @vsi_handle: unique VSI handle
1697 * @vsi_ctx: pointer to a VSI context struct
1698 * @cd: pointer to command details structure or NULL
1700 * Update VSI context in the hardware
1703 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1704 struct ice_sq_cd *cd)
1706 if (!ice_is_vsi_valid(hw, vsi_handle))
1708 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1709 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1713 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1714 * @hw: pointer to HW struct
1715 * @vsi_handle: VSI SW index
1716 * @enable: boolean for enable/disable
1719 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1721 struct ice_vsi_ctx *ctx;
1723 ctx = ice_get_vsi_ctx(hw, vsi_handle);
1728 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1730 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1732 return ice_update_vsi(hw, vsi_handle, ctx, NULL);
1736 * ice_aq_alloc_free_vsi_list
1737 * @hw: pointer to the HW struct
1738 * @vsi_list_id: VSI list ID returned or used for lookup
1739 * @lkup_type: switch rule filter lookup type
1740 * @opc: switch rules population command type - pass in the command opcode
1742 * allocates or free a VSI list resource
1745 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1746 enum ice_sw_lkup_type lkup_type,
1747 enum ice_adminq_opc opc)
1749 struct ice_aqc_alloc_free_res_elem *sw_buf;
1750 struct ice_aqc_res_elem *vsi_ele;
1754 buf_len = struct_size(sw_buf, elem, 1);
1755 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1758 sw_buf->num_elems = cpu_to_le16(1);
1760 if (lkup_type == ICE_SW_LKUP_MAC ||
1761 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1762 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1763 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1764 lkup_type == ICE_SW_LKUP_PROMISC ||
1765 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1766 lkup_type == ICE_SW_LKUP_DFLT) {
1767 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1768 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1770 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1773 goto ice_aq_alloc_free_vsi_list_exit;
1776 if (opc == ice_aqc_opc_free_res)
1777 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1779 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1781 goto ice_aq_alloc_free_vsi_list_exit;
1783 if (opc == ice_aqc_opc_alloc_res) {
1784 vsi_ele = &sw_buf->elem[0];
1785 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1788 ice_aq_alloc_free_vsi_list_exit:
1789 devm_kfree(ice_hw_to_dev(hw), sw_buf);
1794 * ice_aq_sw_rules - add/update/remove switch rules
1795 * @hw: pointer to the HW struct
1796 * @rule_list: pointer to switch rule population list
1797 * @rule_list_sz: total size of the rule list in bytes
1798 * @num_rules: number of switch rules in the rule_list
1799 * @opc: switch rules population command type - pass in the command opcode
1800 * @cd: pointer to command details structure or NULL
1802 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1805 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1806 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1808 struct ice_aq_desc desc;
1811 if (opc != ice_aqc_opc_add_sw_rules &&
1812 opc != ice_aqc_opc_update_sw_rules &&
1813 opc != ice_aqc_opc_remove_sw_rules)
1816 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1818 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1819 desc.params.sw_rules.num_rules_fltr_entry_index =
1820 cpu_to_le16(num_rules);
1821 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1822 if (opc != ice_aqc_opc_add_sw_rules &&
1823 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1830 * ice_aq_add_recipe - add switch recipe
1831 * @hw: pointer to the HW struct
1832 * @s_recipe_list: pointer to switch rule population list
1833 * @num_recipes: number of switch recipes in the list
1834 * @cd: pointer to command details structure or NULL
1839 ice_aq_add_recipe(struct ice_hw *hw,
1840 struct ice_aqc_recipe_data_elem *s_recipe_list,
1841 u16 num_recipes, struct ice_sq_cd *cd)
1843 struct ice_aqc_add_get_recipe *cmd;
1844 struct ice_aq_desc desc;
1847 cmd = &desc.params.add_get_recipe;
1848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1850 cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1851 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1853 buf_size = num_recipes * sizeof(*s_recipe_list);
1855 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1859 * ice_aq_get_recipe - get switch recipe
1860 * @hw: pointer to the HW struct
1861 * @s_recipe_list: pointer to switch rule population list
1862 * @num_recipes: pointer to the number of recipes (input and output)
1863 * @recipe_root: root recipe number of recipe(s) to retrieve
1864 * @cd: pointer to command details structure or NULL
1868 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1869 * On output, *num_recipes will equal the number of entries returned in
1872 * The caller must supply enough space in s_recipe_list to hold all possible
1873 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1876 ice_aq_get_recipe(struct ice_hw *hw,
1877 struct ice_aqc_recipe_data_elem *s_recipe_list,
1878 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1880 struct ice_aqc_add_get_recipe *cmd;
1881 struct ice_aq_desc desc;
1885 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1888 cmd = &desc.params.add_get_recipe;
1889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1891 cmd->return_index = cpu_to_le16(recipe_root);
1892 cmd->num_sub_recipes = 0;
1894 buf_size = *num_recipes * sizeof(*s_recipe_list);
1896 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1897 *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1903 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1904 * @hw: pointer to the HW struct
1905 * @params: parameters used to update the default recipe
1907 * This function only supports updating default recipes and it only supports
1908 * updating a single recipe based on the lkup_idx at a time.
1910 * This is done as a read-modify-write operation. First, get the current recipe
1911 * contents based on the recipe's ID. Then modify the field vector index and
1912 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1913 * the pre-existing recipe with the modifications.
1916 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1917 struct ice_update_recipe_lkup_idx_params *params)
1919 struct ice_aqc_recipe_data_elem *rcp_list;
1920 u16 num_recps = ICE_MAX_NUM_RECIPES;
1923 rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1927 /* read current recipe list from firmware */
1928 rcp_list->recipe_indx = params->rid;
1929 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
1931 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
1932 params->rid, status);
1936 /* only modify existing recipe's lkup_idx and mask if valid, while
1937 * leaving all other fields the same, then update the recipe firmware
1939 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
1940 if (params->mask_valid)
1941 rcp_list->content.mask[params->lkup_idx] =
1942 cpu_to_le16(params->mask);
1944 if (params->ignore_valid)
1945 rcp_list->content.lkup_indx[params->lkup_idx] |=
1946 ICE_AQ_RECIPE_LKUP_IGNORE;
1948 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
1950 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
1951 params->rid, params->lkup_idx, params->fv_idx,
1952 params->mask, params->mask_valid ? "true" : "false",
1961 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1962 * @hw: pointer to the HW struct
1963 * @profile_id: package profile ID to associate the recipe with
1964 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1965 * @cd: pointer to command details structure or NULL
1966 * Recipe to profile association (0x0291)
1969 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1970 struct ice_sq_cd *cd)
1972 struct ice_aqc_recipe_to_profile *cmd;
1973 struct ice_aq_desc desc;
1975 cmd = &desc.params.recipe_to_profile;
1976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1977 cmd->profile_id = cpu_to_le16(profile_id);
1978 /* Set the recipe ID bit in the bitmask to let the device know which
1979 * profile we are associating the recipe to
1981 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1983 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1987 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1988 * @hw: pointer to the HW struct
1989 * @profile_id: package profile ID to associate the recipe with
1990 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1991 * @cd: pointer to command details structure or NULL
1992 * Associate profile ID with given recipe (0x0293)
1995 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1996 struct ice_sq_cd *cd)
1998 struct ice_aqc_recipe_to_profile *cmd;
1999 struct ice_aq_desc desc;
2002 cmd = &desc.params.recipe_to_profile;
2003 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2004 cmd->profile_id = cpu_to_le16(profile_id);
2006 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2008 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
2014 * ice_alloc_recipe - add recipe resource
2015 * @hw: pointer to the hardware structure
2016 * @rid: recipe ID returned as response to AQ call
2018 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2020 struct ice_aqc_alloc_free_res_elem *sw_buf;
2024 buf_len = struct_size(sw_buf, elem, 1);
2025 sw_buf = kzalloc(buf_len, GFP_KERNEL);
2029 sw_buf->num_elems = cpu_to_le16(1);
2030 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2031 ICE_AQC_RES_TYPE_S) |
2032 ICE_AQC_RES_TYPE_FLAG_SHARED);
2033 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2034 ice_aqc_opc_alloc_res, NULL);
2036 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2043 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2044 * @hw: pointer to hardware structure
2046 * This function is used to populate recipe_to_profile matrix where index to
2047 * this array is the recipe ID and the element is the mapping of which profiles
2048 * is this recipe mapped to.
2050 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2052 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2055 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2058 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2059 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2060 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2062 bitmap_copy(profile_to_recipe[i], r_bitmap,
2063 ICE_MAX_NUM_RECIPES);
2064 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2065 set_bit(i, recipe_to_profile[j]);
2070 * ice_collect_result_idx - copy result index values
2071 * @buf: buffer that contains the result index
2072 * @recp: the recipe struct to copy data into
2075 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2076 struct ice_sw_recipe *recp)
2078 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2079 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2084 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2085 * @hw: pointer to hardware structure
2086 * @recps: struct that we need to populate
2087 * @rid: recipe ID that we are populating
2088 * @refresh_required: true if we should get recipe to profile mapping from FW
2090 * This function is used to populate all the necessary entries into our
2091 * bookkeeping so that we have a current list of all the recipes that are
2092 * programmed in the firmware.
2095 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2096 bool *refresh_required)
2098 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2099 struct ice_aqc_recipe_data_elem *tmp;
2100 u16 num_recps = ICE_MAX_NUM_RECIPES;
2101 struct ice_prot_lkup_ext *lkup_exts;
2106 bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2108 /* we need a buffer big enough to accommodate all the recipes */
2109 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2113 tmp[0].recipe_indx = rid;
2114 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2115 /* non-zero status meaning recipe doesn't exist */
2119 /* Get recipe to profile map so that we can get the fv from lkups that
2120 * we read for a recipe from FW. Since we want to minimize the number of
2121 * times we make this FW call, just make one call and cache the copy
2122 * until a new recipe is added. This operation is only required the
2123 * first time to get the changes from FW. Then to search existing
2124 * entries we don't need to update the cache again until another recipe
2127 if (*refresh_required) {
2128 ice_get_recp_to_prof_map(hw);
2129 *refresh_required = false;
2132 /* Start populating all the entries for recps[rid] based on lkups from
2133 * firmware. Note that we are only creating the root recipe in our
2136 lkup_exts = &recps[rid].lkup_exts;
2138 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2139 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2140 struct ice_recp_grp_entry *rg_entry;
2141 u8 i, prof, idx, prot = 0;
2145 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2152 idx = root_bufs.recipe_indx;
2153 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2155 /* Mark all result indices in this chain */
2156 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2157 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2160 /* get the first profile that is associated with rid */
2161 prof = find_first_bit(recipe_to_profile[idx],
2162 ICE_MAX_NUM_PROFILES);
2163 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2164 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2166 rg_entry->fv_idx[i] = lkup_indx;
2167 rg_entry->fv_mask[i] =
2168 le16_to_cpu(root_bufs.content.mask[i + 1]);
2170 /* If the recipe is a chained recipe then all its
2171 * child recipe's result will have a result index.
2172 * To fill fv_words we should not use those result
2173 * index, we only need the protocol ids and offsets.
2174 * We will skip all the fv_idx which stores result
2175 * index in them. We also need to skip any fv_idx which
2176 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2177 * valid offset value.
2179 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2180 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2181 rg_entry->fv_idx[i] == 0)
2184 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2185 rg_entry->fv_idx[i], &prot, &off);
2186 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2187 lkup_exts->fv_words[fv_word_idx].off = off;
2188 lkup_exts->field_mask[fv_word_idx] =
2189 rg_entry->fv_mask[i];
2192 /* populate rg_list with the data from the child entry of this
2195 list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2197 /* Propagate some data to the recipe database */
2198 recps[idx].is_root = !!is_root;
2199 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2200 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2201 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2202 recps[idx].chain_idx = root_bufs.content.result_indx &
2203 ~ICE_AQ_RECIPE_RESULT_EN;
2204 set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2206 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2212 /* Only do the following for root recipes entries */
2213 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2214 sizeof(recps[idx].r_bitmap));
2215 recps[idx].root_rid = root_bufs.content.rid &
2216 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2217 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2220 /* Complete initialization of the root recipe entry */
2221 lkup_exts->n_val_words = fv_word_idx;
2222 recps[rid].big_recp = (num_recps > 1);
2223 recps[rid].n_grp_count = (u8)num_recps;
2224 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2225 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2227 if (!recps[rid].root_buf) {
2232 /* Copy result indexes */
2233 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2234 recps[rid].recp_created = true;
2241 /* ice_init_port_info - Initialize port_info with switch configuration data
2242 * @pi: pointer to port_info
2243 * @vsi_port_num: VSI number or port number
2244 * @type: Type of switch element (port or VSI)
2245 * @swid: switch ID of the switch the element is attached to
2246 * @pf_vf_num: PF or VF number
2247 * @is_vf: true if the element is a VF, false otherwise
2250 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2251 u16 swid, u16 pf_vf_num, bool is_vf)
2254 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2255 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2257 pi->pf_vf_num = pf_vf_num;
2261 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2266 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2267 * @hw: pointer to the hardware structure
2269 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2271 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2277 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
2283 /* Multiple calls to ice_aq_get_sw_cfg may be required
2284 * to get all the switch configuration information. The need
2285 * for additional calls is indicated by ice_aq_get_sw_cfg
2286 * writing a non-zero value in req_desc
2289 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2291 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2292 &req_desc, &num_elems, NULL);
2297 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2298 u16 pf_vf_num, swid, vsi_port_num;
2302 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2303 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2305 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2306 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2308 swid = le16_to_cpu(ele->swid);
2310 if (le16_to_cpu(ele->pf_vf_num) &
2311 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2314 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2315 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2317 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2318 /* FW VSI is not needed. Just continue. */
2322 ice_init_port_info(hw->port_info, vsi_port_num,
2323 res_type, swid, pf_vf_num, is_vf);
2325 } while (req_desc && !status);
2327 devm_kfree(ice_hw_to_dev(hw), rbuf);
2332 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2333 * @hw: pointer to the hardware structure
2334 * @fi: filter info structure to fill/update
2336 * This helper function populates the lb_en and lan_en elements of the provided
2337 * ice_fltr_info struct using the switch's type and characteristics of the
2338 * switch rule being configured.
2340 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2344 if ((fi->flag & ICE_FLTR_TX) &&
2345 (fi->fltr_act == ICE_FWD_TO_VSI ||
2346 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2347 fi->fltr_act == ICE_FWD_TO_Q ||
2348 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2349 /* Setting LB for prune actions will result in replicated
2350 * packets to the internal switch that will be dropped.
2352 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2355 /* Set lan_en to TRUE if
2356 * 1. The switch is a VEB AND
2358 * 2.1 The lookup is a directional lookup like ethertype,
2359 * promiscuous, ethertype-MAC, promiscuous-VLAN
2360 * and default-port OR
2361 * 2.2 The lookup is VLAN, OR
2362 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2363 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2367 * The switch is a VEPA.
2369 * In all other cases, the LAN enable has to be set to false.
2372 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2373 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2374 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2375 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2376 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2377 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2378 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2379 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2380 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2381 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2390 * ice_fill_sw_rule - Helper function to fill switch rule structure
2391 * @hw: pointer to the hardware structure
2392 * @f_info: entry containing packet forwarding information
2393 * @s_rule: switch rule structure to be filled in based on mac_entry
2394 * @opc: switch rules population command type - pass in the command opcode
2397 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2398 struct ice_sw_rule_lkup_rx_tx *s_rule,
2399 enum ice_adminq_opc opc)
2401 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2402 u16 vlan_tpid = ETH_P_8021Q;
2410 if (opc == ice_aqc_opc_remove_sw_rules) {
2412 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2413 s_rule->hdr_len = 0;
2417 eth_hdr_sz = sizeof(dummy_eth_header);
2418 eth_hdr = s_rule->hdr_data;
2420 /* initialize the ether header with a dummy header */
2421 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2422 ice_fill_sw_info(hw, f_info);
2424 switch (f_info->fltr_act) {
2425 case ICE_FWD_TO_VSI:
2426 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2427 ICE_SINGLE_ACT_VSI_ID_M;
2428 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2429 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2430 ICE_SINGLE_ACT_VALID_BIT;
2432 case ICE_FWD_TO_VSI_LIST:
2433 act |= ICE_SINGLE_ACT_VSI_LIST;
2434 act |= (f_info->fwd_id.vsi_list_id <<
2435 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2436 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2437 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2438 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2439 ICE_SINGLE_ACT_VALID_BIT;
2442 act |= ICE_SINGLE_ACT_TO_Q;
2443 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2444 ICE_SINGLE_ACT_Q_INDEX_M;
2446 case ICE_DROP_PACKET:
2447 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2448 ICE_SINGLE_ACT_VALID_BIT;
2450 case ICE_FWD_TO_QGRP:
2451 q_rgn = f_info->qgrp_size > 0 ?
2452 (u8)ilog2(f_info->qgrp_size) : 0;
2453 act |= ICE_SINGLE_ACT_TO_Q;
2454 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2455 ICE_SINGLE_ACT_Q_INDEX_M;
2456 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2457 ICE_SINGLE_ACT_Q_REGION_M;
2464 act |= ICE_SINGLE_ACT_LB_ENABLE;
2466 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2468 switch (f_info->lkup_type) {
2469 case ICE_SW_LKUP_MAC:
2470 daddr = f_info->l_data.mac.mac_addr;
2472 case ICE_SW_LKUP_VLAN:
2473 vlan_id = f_info->l_data.vlan.vlan_id;
2474 if (f_info->l_data.vlan.tpid_valid)
2475 vlan_tpid = f_info->l_data.vlan.tpid;
2476 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2477 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2478 act |= ICE_SINGLE_ACT_PRUNE;
2479 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2482 case ICE_SW_LKUP_ETHERTYPE_MAC:
2483 daddr = f_info->l_data.ethertype_mac.mac_addr;
2485 case ICE_SW_LKUP_ETHERTYPE:
2486 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2487 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2489 case ICE_SW_LKUP_MAC_VLAN:
2490 daddr = f_info->l_data.mac_vlan.mac_addr;
2491 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2493 case ICE_SW_LKUP_PROMISC_VLAN:
2494 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2496 case ICE_SW_LKUP_PROMISC:
2497 daddr = f_info->l_data.mac_vlan.mac_addr;
2503 s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2504 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2505 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2507 /* Recipe set depending on lookup type */
2508 s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2509 s_rule->src = cpu_to_le16(f_info->src);
2510 s_rule->act = cpu_to_le32(act);
2513 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2515 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2516 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2517 *off = cpu_to_be16(vlan_id);
2518 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2519 *off = cpu_to_be16(vlan_tpid);
2522 /* Create the switch rule with the final dummy Ethernet header */
2523 if (opc != ice_aqc_opc_update_sw_rules)
2524 s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2528 * ice_add_marker_act
2529 * @hw: pointer to the hardware structure
2530 * @m_ent: the management entry for which sw marker needs to be added
2531 * @sw_marker: sw marker to tag the Rx descriptor with
2532 * @l_id: large action resource ID
2534 * Create a large action to hold software marker and update the switch rule
2535 * entry pointed by m_ent with newly created large action
2538 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2539 u16 sw_marker, u16 l_id)
2541 struct ice_sw_rule_lkup_rx_tx *rx_tx;
2542 struct ice_sw_rule_lg_act *lg_act;
2543 /* For software marker we need 3 large actions
2544 * 1. FWD action: FWD TO VSI or VSI LIST
2545 * 2. GENERIC VALUE action to hold the profile ID
2546 * 3. GENERIC VALUE action to hold the software marker ID
2548 const u16 num_lg_acts = 3;
2555 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2558 /* Create two back-to-back switch rules and submit them to the HW using
2559 * one memory buffer:
2563 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2564 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2565 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2569 rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2571 /* Fill in the first switch rule i.e. large action */
2572 lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2573 lg_act->index = cpu_to_le16(l_id);
2574 lg_act->size = cpu_to_le16(num_lg_acts);
2576 /* First action VSI forwarding or VSI list forwarding depending on how
2579 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2580 m_ent->fltr_info.fwd_id.hw_vsi_id;
2582 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2583 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2584 if (m_ent->vsi_count > 1)
2585 act |= ICE_LG_ACT_VSI_LIST;
2586 lg_act->act[0] = cpu_to_le32(act);
2588 /* Second action descriptor type */
2589 act = ICE_LG_ACT_GENERIC;
2591 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2592 lg_act->act[1] = cpu_to_le32(act);
2594 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2595 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2597 /* Third action Marker value */
2598 act |= ICE_LG_ACT_GENERIC;
2599 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2600 ICE_LG_ACT_GENERIC_VALUE_M;
2602 lg_act->act[2] = cpu_to_le32(act);
2604 /* call the fill switch rule to fill the lookup Tx Rx structure */
2605 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2606 ice_aqc_opc_update_sw_rules);
2608 /* Update the action to point to the large action ID */
2609 rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2610 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2611 ICE_SINGLE_ACT_PTR_VAL_M));
2613 /* Use the filter rule ID of the previously created rule with single
2614 * act. Once the update happens, hardware will treat this as large
2617 rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2619 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2620 ice_aqc_opc_update_sw_rules, NULL);
2622 m_ent->lg_act_idx = l_id;
2623 m_ent->sw_marker_id = sw_marker;
2626 devm_kfree(ice_hw_to_dev(hw), lg_act);
2631 * ice_create_vsi_list_map
2632 * @hw: pointer to the hardware structure
2633 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2634 * @num_vsi: number of VSI handles in the array
2635 * @vsi_list_id: VSI list ID generated as part of allocate resource
2637 * Helper function to create a new entry of VSI list ID to VSI mapping
2638 * using the given VSI list ID
2640 static struct ice_vsi_list_map_info *
2641 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2644 struct ice_switch_info *sw = hw->switch_info;
2645 struct ice_vsi_list_map_info *v_map;
2648 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2652 v_map->vsi_list_id = vsi_list_id;
2654 for (i = 0; i < num_vsi; i++)
2655 set_bit(vsi_handle_arr[i], v_map->vsi_map);
2657 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2662 * ice_update_vsi_list_rule
2663 * @hw: pointer to the hardware structure
2664 * @vsi_handle_arr: array of VSI handles to form a VSI list
2665 * @num_vsi: number of VSI handles in the array
2666 * @vsi_list_id: VSI list ID generated as part of allocate resource
2667 * @remove: Boolean value to indicate if this is a remove action
2668 * @opc: switch rules population command type - pass in the command opcode
2669 * @lkup_type: lookup type of the filter
2671 * Call AQ command to add a new switch rule or update existing switch rule
2672 * using the given VSI list ID
2675 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2676 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2677 enum ice_sw_lkup_type lkup_type)
2679 struct ice_sw_rule_vsi_list *s_rule;
2688 if (lkup_type == ICE_SW_LKUP_MAC ||
2689 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2690 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2691 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2692 lkup_type == ICE_SW_LKUP_PROMISC ||
2693 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2694 lkup_type == ICE_SW_LKUP_DFLT)
2695 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2696 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2697 else if (lkup_type == ICE_SW_LKUP_VLAN)
2698 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2699 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2703 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2704 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2707 for (i = 0; i < num_vsi; i++) {
2708 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2712 /* AQ call requires hw_vsi_id(s) */
2714 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2717 s_rule->hdr.type = cpu_to_le16(rule_type);
2718 s_rule->number_vsi = cpu_to_le16(num_vsi);
2719 s_rule->index = cpu_to_le16(vsi_list_id);
2721 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2724 devm_kfree(ice_hw_to_dev(hw), s_rule);
2729 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2730 * @hw: pointer to the HW struct
2731 * @vsi_handle_arr: array of VSI handles to form a VSI list
2732 * @num_vsi: number of VSI handles in the array
2733 * @vsi_list_id: stores the ID of the VSI list to be created
2734 * @lkup_type: switch rule filter's lookup type
2737 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2738 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2742 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2743 ice_aqc_opc_alloc_res);
2747 /* Update the newly created VSI list to include the specified VSIs */
2748 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2749 *vsi_list_id, false,
2750 ice_aqc_opc_add_sw_rules, lkup_type);
2754 * ice_create_pkt_fwd_rule
2755 * @hw: pointer to the hardware structure
2756 * @f_entry: entry containing packet forwarding information
2758 * Create switch rule with given filter information and add an entry
2759 * to the corresponding filter management list to track this switch rule
2763 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2764 struct ice_fltr_list_entry *f_entry)
2766 struct ice_fltr_mgmt_list_entry *fm_entry;
2767 struct ice_sw_rule_lkup_rx_tx *s_rule;
2768 enum ice_sw_lkup_type l_type;
2769 struct ice_sw_recipe *recp;
2772 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2773 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2777 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2781 goto ice_create_pkt_fwd_rule_exit;
2784 fm_entry->fltr_info = f_entry->fltr_info;
2786 /* Initialize all the fields for the management entry */
2787 fm_entry->vsi_count = 1;
2788 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2789 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2790 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2792 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2793 ice_aqc_opc_add_sw_rules);
2795 status = ice_aq_sw_rules(hw, s_rule,
2796 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2797 ice_aqc_opc_add_sw_rules, NULL);
2799 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2800 goto ice_create_pkt_fwd_rule_exit;
2803 f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2804 fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2806 /* The book keeping entries will get removed when base driver
2807 * calls remove filter AQ command
2809 l_type = fm_entry->fltr_info.lkup_type;
2810 recp = &hw->switch_info->recp_list[l_type];
2811 list_add(&fm_entry->list_entry, &recp->filt_rules);
2813 ice_create_pkt_fwd_rule_exit:
2814 devm_kfree(ice_hw_to_dev(hw), s_rule);
2819 * ice_update_pkt_fwd_rule
2820 * @hw: pointer to the hardware structure
2821 * @f_info: filter information for switch rule
2823 * Call AQ command to update a previously created switch rule with a
2827 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2829 struct ice_sw_rule_lkup_rx_tx *s_rule;
2832 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2833 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2838 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2840 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2842 /* Update switch rule with new rule set to forward VSI list */
2843 status = ice_aq_sw_rules(hw, s_rule,
2844 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2845 ice_aqc_opc_update_sw_rules, NULL);
2847 devm_kfree(ice_hw_to_dev(hw), s_rule);
2852 * ice_update_sw_rule_bridge_mode
2853 * @hw: pointer to the HW struct
2855 * Updates unicast switch filter rules based on VEB/VEPA mode
2857 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2859 struct ice_switch_info *sw = hw->switch_info;
2860 struct ice_fltr_mgmt_list_entry *fm_entry;
2861 struct list_head *rule_head;
2862 struct mutex *rule_lock; /* Lock to protect filter rule list */
2865 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2866 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2868 mutex_lock(rule_lock);
2869 list_for_each_entry(fm_entry, rule_head, list_entry) {
2870 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2871 u8 *addr = fi->l_data.mac.mac_addr;
2873 /* Update unicast Tx rules to reflect the selected
2876 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2877 (fi->fltr_act == ICE_FWD_TO_VSI ||
2878 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2879 fi->fltr_act == ICE_FWD_TO_Q ||
2880 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2881 status = ice_update_pkt_fwd_rule(hw, fi);
2887 mutex_unlock(rule_lock);
2893 * ice_add_update_vsi_list
2894 * @hw: pointer to the hardware structure
2895 * @m_entry: pointer to current filter management list entry
2896 * @cur_fltr: filter information from the book keeping entry
2897 * @new_fltr: filter information with the new VSI to be added
2899 * Call AQ command to add or update previously created VSI list with new VSI.
2901 * Helper function to do book keeping associated with adding filter information
2902 * The algorithm to do the book keeping is described below :
2903 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2904 * if only one VSI has been added till now
2905 * Allocate a new VSI list and add two VSIs
2906 * to this list using switch rule command
2907 * Update the previously created switch rule with the
2908 * newly created VSI list ID
2909 * if a VSI list was previously created
2910 * Add the new VSI to the previously created VSI list set
2911 * using the update switch rule command
2914 ice_add_update_vsi_list(struct ice_hw *hw,
2915 struct ice_fltr_mgmt_list_entry *m_entry,
2916 struct ice_fltr_info *cur_fltr,
2917 struct ice_fltr_info *new_fltr)
2919 u16 vsi_list_id = 0;
2922 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2923 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2926 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2927 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2928 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2929 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2932 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2933 /* Only one entry existed in the mapping and it was not already
2934 * a part of a VSI list. So, create a VSI list with the old and
2937 struct ice_fltr_info tmp_fltr;
2938 u16 vsi_handle_arr[2];
2940 /* A rule already exists with the new VSI being added */
2941 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2944 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2945 vsi_handle_arr[1] = new_fltr->vsi_handle;
2946 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2948 new_fltr->lkup_type);
2952 tmp_fltr = *new_fltr;
2953 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2954 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2955 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2956 /* Update the previous switch rule of "MAC forward to VSI" to
2957 * "MAC fwd to VSI list"
2959 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2963 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2964 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2965 m_entry->vsi_list_info =
2966 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2969 if (!m_entry->vsi_list_info)
2972 /* If this entry was large action then the large action needs
2973 * to be updated to point to FWD to VSI list
2975 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2977 ice_add_marker_act(hw, m_entry,
2978 m_entry->sw_marker_id,
2979 m_entry->lg_act_idx);
2981 u16 vsi_handle = new_fltr->vsi_handle;
2982 enum ice_adminq_opc opcode;
2984 if (!m_entry->vsi_list_info)
2987 /* A rule already exists with the new VSI being added */
2988 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2991 /* Update the previously created VSI list set with
2992 * the new VSI ID passed in
2994 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2995 opcode = ice_aqc_opc_update_sw_rules;
2997 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2998 vsi_list_id, false, opcode,
2999 new_fltr->lkup_type);
3000 /* update VSI list mapping info with new VSI ID */
3002 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3005 m_entry->vsi_count++;
3010 * ice_find_rule_entry - Search a rule entry
3011 * @hw: pointer to the hardware structure
3012 * @recp_id: lookup type for which the specified rule needs to be searched
3013 * @f_info: rule information
3015 * Helper function to search for a given rule entry
3016 * Returns pointer to entry storing the rule if found
3018 static struct ice_fltr_mgmt_list_entry *
3019 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3021 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3022 struct ice_switch_info *sw = hw->switch_info;
3023 struct list_head *list_head;
3025 list_head = &sw->recp_list[recp_id].filt_rules;
3026 list_for_each_entry(list_itr, list_head, list_entry) {
3027 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3028 sizeof(f_info->l_data)) &&
3029 f_info->flag == list_itr->fltr_info.flag) {
3038 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3039 * @hw: pointer to the hardware structure
3040 * @recp_id: lookup type for which VSI lists needs to be searched
3041 * @vsi_handle: VSI handle to be found in VSI list
3042 * @vsi_list_id: VSI list ID found containing vsi_handle
3044 * Helper function to search a VSI list with single entry containing given VSI
3045 * handle element. This can be extended further to search VSI list with more
3046 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3048 static struct ice_vsi_list_map_info *
3049 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3052 struct ice_vsi_list_map_info *map_info = NULL;
3053 struct ice_switch_info *sw = hw->switch_info;
3054 struct ice_fltr_mgmt_list_entry *list_itr;
3055 struct list_head *list_head;
3057 list_head = &sw->recp_list[recp_id].filt_rules;
3058 list_for_each_entry(list_itr, list_head, list_entry) {
3059 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3060 map_info = list_itr->vsi_list_info;
3061 if (test_bit(vsi_handle, map_info->vsi_map)) {
3062 *vsi_list_id = map_info->vsi_list_id;
3071 * ice_add_rule_internal - add rule for a given lookup type
3072 * @hw: pointer to the hardware structure
3073 * @recp_id: lookup type (recipe ID) for which rule has to be added
3074 * @f_entry: structure containing MAC forwarding information
3076 * Adds or updates the rule lists for a given recipe
3079 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3080 struct ice_fltr_list_entry *f_entry)
3082 struct ice_switch_info *sw = hw->switch_info;
3083 struct ice_fltr_info *new_fltr, *cur_fltr;
3084 struct ice_fltr_mgmt_list_entry *m_entry;
3085 struct mutex *rule_lock; /* Lock to protect filter rule list */
3088 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3090 f_entry->fltr_info.fwd_id.hw_vsi_id =
3091 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3093 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3095 mutex_lock(rule_lock);
3096 new_fltr = &f_entry->fltr_info;
3097 if (new_fltr->flag & ICE_FLTR_RX)
3098 new_fltr->src = hw->port_info->lport;
3099 else if (new_fltr->flag & ICE_FLTR_TX)
3100 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3102 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3104 mutex_unlock(rule_lock);
3105 return ice_create_pkt_fwd_rule(hw, f_entry);
3108 cur_fltr = &m_entry->fltr_info;
3109 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3110 mutex_unlock(rule_lock);
3116 * ice_remove_vsi_list_rule
3117 * @hw: pointer to the hardware structure
3118 * @vsi_list_id: VSI list ID generated as part of allocate resource
3119 * @lkup_type: switch rule filter lookup type
3121 * The VSI list should be emptied before this function is called to remove the
3125 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3126 enum ice_sw_lkup_type lkup_type)
3128 struct ice_sw_rule_vsi_list *s_rule;
3132 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3133 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3137 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3138 s_rule->index = cpu_to_le16(vsi_list_id);
3140 /* Free the vsi_list resource that we allocated. It is assumed that the
3141 * list is empty at this point.
3143 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3144 ice_aqc_opc_free_res);
3146 devm_kfree(ice_hw_to_dev(hw), s_rule);
3151 * ice_rem_update_vsi_list
3152 * @hw: pointer to the hardware structure
3153 * @vsi_handle: VSI handle of the VSI to remove
3154 * @fm_list: filter management entry for which the VSI list management needs to
3158 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3159 struct ice_fltr_mgmt_list_entry *fm_list)
3161 enum ice_sw_lkup_type lkup_type;
3165 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3166 fm_list->vsi_count == 0)
3169 /* A rule with the VSI being removed does not exist */
3170 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3173 lkup_type = fm_list->fltr_info.lkup_type;
3174 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3175 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3176 ice_aqc_opc_update_sw_rules,
3181 fm_list->vsi_count--;
3182 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3184 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3185 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3186 struct ice_vsi_list_map_info *vsi_list_info =
3187 fm_list->vsi_list_info;
3190 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3192 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3195 /* Make sure VSI list is empty before removing it below */
3196 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3198 ice_aqc_opc_update_sw_rules,
3203 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3204 tmp_fltr_info.fwd_id.hw_vsi_id =
3205 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3206 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3207 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3209 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3210 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3214 fm_list->fltr_info = tmp_fltr_info;
3217 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3218 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3219 struct ice_vsi_list_map_info *vsi_list_info =
3220 fm_list->vsi_list_info;
3222 /* Remove the VSI list since it is no longer used */
3223 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3225 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3226 vsi_list_id, status);
3230 list_del(&vsi_list_info->list_entry);
3231 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3232 fm_list->vsi_list_info = NULL;
3239 * ice_remove_rule_internal - Remove a filter rule of a given type
3240 * @hw: pointer to the hardware structure
3241 * @recp_id: recipe ID for which the rule needs to removed
3242 * @f_entry: rule entry containing filter information
3245 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3246 struct ice_fltr_list_entry *f_entry)
3248 struct ice_switch_info *sw = hw->switch_info;
3249 struct ice_fltr_mgmt_list_entry *list_elem;
3250 struct mutex *rule_lock; /* Lock to protect filter rule list */
3251 bool remove_rule = false;
3255 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3257 f_entry->fltr_info.fwd_id.hw_vsi_id =
3258 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3260 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3261 mutex_lock(rule_lock);
3262 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3268 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3270 } else if (!list_elem->vsi_list_info) {
3273 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3274 /* a ref_cnt > 1 indicates that the vsi_list is being
3275 * shared by multiple rules. Decrement the ref_cnt and
3276 * remove this rule, but do not modify the list, as it
3277 * is in-use by other rules.
3279 list_elem->vsi_list_info->ref_cnt--;
3282 /* a ref_cnt of 1 indicates the vsi_list is only used
3283 * by one rule. However, the original removal request is only
3284 * for a single VSI. Update the vsi_list first, and only
3285 * remove the rule if there are no further VSIs in this list.
3287 vsi_handle = f_entry->fltr_info.vsi_handle;
3288 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3291 /* if VSI count goes to zero after updating the VSI list */
3292 if (list_elem->vsi_count == 0)
3297 /* Remove the lookup rule */
3298 struct ice_sw_rule_lkup_rx_tx *s_rule;
3300 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3301 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3308 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3309 ice_aqc_opc_remove_sw_rules);
3311 status = ice_aq_sw_rules(hw, s_rule,
3312 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3313 1, ice_aqc_opc_remove_sw_rules, NULL);
3315 /* Remove a book keeping from the list */
3316 devm_kfree(ice_hw_to_dev(hw), s_rule);
3321 list_del(&list_elem->list_entry);
3322 devm_kfree(ice_hw_to_dev(hw), list_elem);
3325 mutex_unlock(rule_lock);
3330 * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3331 * @hw: pointer to the hardware structure
3332 * @mac: MAC address to be checked (for MAC filter)
3333 * @vsi_handle: check MAC filter for this VSI
3335 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3337 struct ice_fltr_mgmt_list_entry *entry;
3338 struct list_head *rule_head;
3339 struct ice_switch_info *sw;
3340 struct mutex *rule_lock; /* Lock to protect filter rule list */
3343 if (!ice_is_vsi_valid(hw, vsi_handle))
3346 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3347 sw = hw->switch_info;
3348 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3352 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3353 mutex_lock(rule_lock);
3354 list_for_each_entry(entry, rule_head, list_entry) {
3355 struct ice_fltr_info *f_info = &entry->fltr_info;
3356 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3358 if (is_zero_ether_addr(mac_addr))
3361 if (f_info->flag != ICE_FLTR_TX ||
3362 f_info->src_id != ICE_SRC_ID_VSI ||
3363 f_info->lkup_type != ICE_SW_LKUP_MAC ||
3364 f_info->fltr_act != ICE_FWD_TO_VSI ||
3365 hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3368 if (ether_addr_equal(mac, mac_addr)) {
3369 mutex_unlock(rule_lock);
3373 mutex_unlock(rule_lock);
3378 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3379 * @hw: pointer to the hardware structure
3381 * @vsi_handle: check MAC filter for this VSI
3383 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3385 struct ice_fltr_mgmt_list_entry *entry;
3386 struct list_head *rule_head;
3387 struct ice_switch_info *sw;
3388 struct mutex *rule_lock; /* Lock to protect filter rule list */
3391 if (vlan_id > ICE_MAX_VLAN_ID)
3394 if (!ice_is_vsi_valid(hw, vsi_handle))
3397 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3398 sw = hw->switch_info;
3399 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3403 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3404 mutex_lock(rule_lock);
3405 list_for_each_entry(entry, rule_head, list_entry) {
3406 struct ice_fltr_info *f_info = &entry->fltr_info;
3407 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3408 struct ice_vsi_list_map_info *map_info;
3410 if (entry_vlan_id > ICE_MAX_VLAN_ID)
3413 if (f_info->flag != ICE_FLTR_TX ||
3414 f_info->src_id != ICE_SRC_ID_VSI ||
3415 f_info->lkup_type != ICE_SW_LKUP_VLAN)
3418 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3419 if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3420 f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3423 if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3424 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3426 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3427 /* If filter_action is FWD_TO_VSI_LIST, make sure
3428 * that VSI being checked is part of VSI list
3430 if (entry->vsi_count == 1 &&
3431 entry->vsi_list_info) {
3432 map_info = entry->vsi_list_info;
3433 if (!test_bit(vsi_handle, map_info->vsi_map))
3438 if (vlan_id == entry_vlan_id) {
3439 mutex_unlock(rule_lock);
3443 mutex_unlock(rule_lock);
3449 * ice_add_mac - Add a MAC address based filter rule
3450 * @hw: pointer to the hardware structure
3451 * @m_list: list of MAC addresses and forwarding information
3453 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3455 struct ice_fltr_list_entry *m_list_itr;
3461 list_for_each_entry(m_list_itr, m_list, list_entry) {
3462 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3466 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3467 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3468 if (!ice_is_vsi_valid(hw, vsi_handle))
3470 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3471 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3472 /* update the src in case it is VSI num */
3473 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3475 m_list_itr->fltr_info.src = hw_vsi_id;
3476 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3477 is_zero_ether_addr(add))
3480 m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3482 if (m_list_itr->status)
3483 return m_list_itr->status;
3490 * ice_add_vlan_internal - Add one VLAN based filter rule
3491 * @hw: pointer to the hardware structure
3492 * @f_entry: filter entry containing one VLAN information
3495 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3497 struct ice_switch_info *sw = hw->switch_info;
3498 struct ice_fltr_mgmt_list_entry *v_list_itr;
3499 struct ice_fltr_info *new_fltr, *cur_fltr;
3500 enum ice_sw_lkup_type lkup_type;
3501 u16 vsi_list_id = 0, vsi_handle;
3502 struct mutex *rule_lock; /* Lock to protect filter rule list */
3505 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3508 f_entry->fltr_info.fwd_id.hw_vsi_id =
3509 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3510 new_fltr = &f_entry->fltr_info;
3512 /* VLAN ID should only be 12 bits */
3513 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3516 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3519 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3520 lkup_type = new_fltr->lkup_type;
3521 vsi_handle = new_fltr->vsi_handle;
3522 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3523 mutex_lock(rule_lock);
3524 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3526 struct ice_vsi_list_map_info *map_info = NULL;
3528 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3529 /* All VLAN pruning rules use a VSI list. Check if
3530 * there is already a VSI list containing VSI that we
3531 * want to add. If found, use the same vsi_list_id for
3532 * this new VLAN rule or else create a new list.
3534 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3538 status = ice_create_vsi_list_rule(hw,
3546 /* Convert the action to forwarding to a VSI list. */
3547 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3548 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3551 status = ice_create_pkt_fwd_rule(hw, f_entry);
3553 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3559 /* reuse VSI list for new rule and increment ref_cnt */
3561 v_list_itr->vsi_list_info = map_info;
3562 map_info->ref_cnt++;
3564 v_list_itr->vsi_list_info =
3565 ice_create_vsi_list_map(hw, &vsi_handle,
3569 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3570 /* Update existing VSI list to add new VSI ID only if it used
3573 cur_fltr = &v_list_itr->fltr_info;
3574 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3577 /* If VLAN rule exists and VSI list being used by this rule is
3578 * referenced by more than 1 VLAN rule. Then create a new VSI
3579 * list appending previous VSI with new VSI and update existing
3580 * VLAN rule to point to new VSI list ID
3582 struct ice_fltr_info tmp_fltr;
3583 u16 vsi_handle_arr[2];
3586 /* Current implementation only supports reusing VSI list with
3587 * one VSI count. We should never hit below condition
3589 if (v_list_itr->vsi_count > 1 &&
3590 v_list_itr->vsi_list_info->ref_cnt > 1) {
3591 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3597 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3600 /* A rule already exists with the new VSI being added */
3601 if (cur_handle == vsi_handle) {
3606 vsi_handle_arr[0] = cur_handle;
3607 vsi_handle_arr[1] = vsi_handle;
3608 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3609 &vsi_list_id, lkup_type);
3613 tmp_fltr = v_list_itr->fltr_info;
3614 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3615 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3616 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3617 /* Update the previous switch rule to a new VSI list which
3618 * includes current VSI that is requested
3620 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3624 /* before overriding VSI list map info. decrement ref_cnt of
3627 v_list_itr->vsi_list_info->ref_cnt--;
3629 /* now update to newly created list */
3630 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3631 v_list_itr->vsi_list_info =
3632 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3634 v_list_itr->vsi_count++;
3638 mutex_unlock(rule_lock);
3643 * ice_add_vlan - Add VLAN based filter rule
3644 * @hw: pointer to the hardware structure
3645 * @v_list: list of VLAN entries and forwarding information
3647 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3649 struct ice_fltr_list_entry *v_list_itr;
3654 list_for_each_entry(v_list_itr, v_list, list_entry) {
3655 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3657 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3658 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3659 if (v_list_itr->status)
3660 return v_list_itr->status;
3666 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3667 * @hw: pointer to the hardware structure
3668 * @em_list: list of ether type MAC filter, MAC is optional
3670 * This function requires the caller to populate the entries in
3671 * the filter list with the necessary fields (including flags to
3672 * indicate Tx or Rx rules).
3674 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3676 struct ice_fltr_list_entry *em_list_itr;
3678 if (!em_list || !hw)
3681 list_for_each_entry(em_list_itr, em_list, list_entry) {
3682 enum ice_sw_lkup_type l_type =
3683 em_list_itr->fltr_info.lkup_type;
3685 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3686 l_type != ICE_SW_LKUP_ETHERTYPE)
3689 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3691 if (em_list_itr->status)
3692 return em_list_itr->status;
3698 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3699 * @hw: pointer to the hardware structure
3700 * @em_list: list of ethertype or ethertype MAC entries
3702 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3704 struct ice_fltr_list_entry *em_list_itr, *tmp;
3706 if (!em_list || !hw)
3709 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3710 enum ice_sw_lkup_type l_type =
3711 em_list_itr->fltr_info.lkup_type;
3713 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3714 l_type != ICE_SW_LKUP_ETHERTYPE)
3717 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3719 if (em_list_itr->status)
3720 return em_list_itr->status;
3726 * ice_rem_sw_rule_info
3727 * @hw: pointer to the hardware structure
3728 * @rule_head: pointer to the switch list structure that we want to delete
3731 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3733 if (!list_empty(rule_head)) {
3734 struct ice_fltr_mgmt_list_entry *entry;
3735 struct ice_fltr_mgmt_list_entry *tmp;
3737 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3738 list_del(&entry->list_entry);
3739 devm_kfree(ice_hw_to_dev(hw), entry);
3745 * ice_rem_adv_rule_info
3746 * @hw: pointer to the hardware structure
3747 * @rule_head: pointer to the switch list structure that we want to delete
3750 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3752 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3753 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3755 if (list_empty(rule_head))
3758 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3759 list_del(&lst_itr->list_entry);
3760 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3761 devm_kfree(ice_hw_to_dev(hw), lst_itr);
3766 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3767 * @pi: pointer to the port_info structure
3768 * @vsi_handle: VSI handle to set as default
3769 * @set: true to add the above mentioned switch rule, false to remove it
3770 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3772 * add filter rule to set/unset given VSI as default VSI for the switch
3773 * (represented by swid)
3776 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3779 struct ice_fltr_list_entry f_list_entry;
3780 struct ice_fltr_info f_info;
3781 struct ice_hw *hw = pi->hw;
3785 if (!ice_is_vsi_valid(hw, vsi_handle))
3788 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3790 memset(&f_info, 0, sizeof(f_info));
3792 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3793 f_info.flag = direction;
3794 f_info.fltr_act = ICE_FWD_TO_VSI;
3795 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3796 f_info.vsi_handle = vsi_handle;
3798 if (f_info.flag & ICE_FLTR_RX) {
3799 f_info.src = hw->port_info->lport;
3800 f_info.src_id = ICE_SRC_ID_LPORT;
3801 } else if (f_info.flag & ICE_FLTR_TX) {
3802 f_info.src_id = ICE_SRC_ID_VSI;
3803 f_info.src = hw_vsi_id;
3805 f_list_entry.fltr_info = f_info;
3808 status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3811 status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3818 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3819 * @fm_entry: filter entry to inspect
3820 * @vsi_handle: VSI handle to compare with filter info
3823 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3825 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3826 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3827 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3828 fm_entry->vsi_list_info &&
3829 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3833 * ice_check_if_dflt_vsi - check if VSI is default VSI
3834 * @pi: pointer to the port_info structure
3835 * @vsi_handle: vsi handle to check for in filter list
3836 * @rule_exists: indicates if there are any VSI's in the rule list
3838 * checks if the VSI is in a default VSI list, and also indicates
3839 * if the default VSI list is empty
3842 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3845 struct ice_fltr_mgmt_list_entry *fm_entry;
3846 struct ice_sw_recipe *recp_list;
3847 struct list_head *rule_head;
3848 struct mutex *rule_lock; /* Lock to protect filter rule list */
3851 recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3852 rule_lock = &recp_list->filt_rule_lock;
3853 rule_head = &recp_list->filt_rules;
3855 mutex_lock(rule_lock);
3857 if (rule_exists && !list_empty(rule_head))
3858 *rule_exists = true;
3860 list_for_each_entry(fm_entry, rule_head, list_entry) {
3861 if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3867 mutex_unlock(rule_lock);
3873 * ice_remove_mac - remove a MAC address based filter rule
3874 * @hw: pointer to the hardware structure
3875 * @m_list: list of MAC addresses and forwarding information
3877 * This function removes either a MAC filter rule or a specific VSI from a
3878 * VSI list for a multicast MAC address.
3880 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3881 * be aware that this call will only work if all the entries passed into m_list
3882 * were added previously. It will not attempt to do a partial remove of entries
3885 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3887 struct ice_fltr_list_entry *list_itr, *tmp;
3892 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3893 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3896 if (l_type != ICE_SW_LKUP_MAC)
3899 vsi_handle = list_itr->fltr_info.vsi_handle;
3900 if (!ice_is_vsi_valid(hw, vsi_handle))
3903 list_itr->fltr_info.fwd_id.hw_vsi_id =
3904 ice_get_hw_vsi_num(hw, vsi_handle);
3906 list_itr->status = ice_remove_rule_internal(hw,
3909 if (list_itr->status)
3910 return list_itr->status;
3916 * ice_remove_vlan - Remove VLAN based filter rule
3917 * @hw: pointer to the hardware structure
3918 * @v_list: list of VLAN entries and forwarding information
3920 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3922 struct ice_fltr_list_entry *v_list_itr, *tmp;
3927 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
3928 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3930 if (l_type != ICE_SW_LKUP_VLAN)
3932 v_list_itr->status = ice_remove_rule_internal(hw,
3935 if (v_list_itr->status)
3936 return v_list_itr->status;
3942 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3943 * @hw: pointer to the hardware structure
3944 * @vsi_handle: VSI handle to remove filters from
3945 * @vsi_list_head: pointer to the list to add entry to
3946 * @fi: pointer to fltr_info of filter entry to copy & add
3948 * Helper function, used when creating a list of filters to remove from
3949 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3950 * original filter entry, with the exception of fltr_info.fltr_act and
3951 * fltr_info.fwd_id fields. These are set such that later logic can
3952 * extract which VSI to remove the fltr from, and pass on that information.
3955 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3956 struct list_head *vsi_list_head,
3957 struct ice_fltr_info *fi)
3959 struct ice_fltr_list_entry *tmp;
3961 /* this memory is freed up in the caller function
3962 * once filters for this VSI are removed
3964 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
3968 tmp->fltr_info = *fi;
3970 /* Overwrite these fields to indicate which VSI to remove filter from,
3971 * so find and remove logic can extract the information from the
3972 * list entries. Note that original entries will still have proper
3975 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3976 tmp->fltr_info.vsi_handle = vsi_handle;
3977 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3979 list_add(&tmp->list_entry, vsi_list_head);
3985 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3986 * @hw: pointer to the hardware structure
3987 * @vsi_handle: VSI handle to remove filters from
3988 * @lkup_list_head: pointer to the list that has certain lookup type filters
3989 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3991 * Locates all filters in lkup_list_head that are used by the given VSI,
3992 * and adds COPIES of those entries to vsi_list_head (intended to be used
3993 * to remove the listed filters).
3994 * Note that this means all entries in vsi_list_head must be explicitly
3995 * deallocated by the caller when done with list.
3998 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3999 struct list_head *lkup_list_head,
4000 struct list_head *vsi_list_head)
4002 struct ice_fltr_mgmt_list_entry *fm_entry;
4005 /* check to make sure VSI ID is valid and within boundary */
4006 if (!ice_is_vsi_valid(hw, vsi_handle))
4009 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4010 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4013 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4015 &fm_entry->fltr_info);
4023 * ice_determine_promisc_mask
4024 * @fi: filter info to parse
4026 * Helper function to determine which ICE_PROMISC_ mask corresponds
4027 * to given filter into.
4029 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4031 u16 vid = fi->l_data.mac_vlan.vlan_id;
4032 u8 *macaddr = fi->l_data.mac.mac_addr;
4033 bool is_tx_fltr = false;
4034 u8 promisc_mask = 0;
4036 if (fi->flag == ICE_FLTR_TX)
4039 if (is_broadcast_ether_addr(macaddr))
4040 promisc_mask |= is_tx_fltr ?
4041 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4042 else if (is_multicast_ether_addr(macaddr))
4043 promisc_mask |= is_tx_fltr ?
4044 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4045 else if (is_unicast_ether_addr(macaddr))
4046 promisc_mask |= is_tx_fltr ?
4047 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4049 promisc_mask |= is_tx_fltr ?
4050 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4052 return promisc_mask;
4056 * ice_remove_promisc - Remove promisc based filter rules
4057 * @hw: pointer to the hardware structure
4058 * @recp_id: recipe ID for which the rule needs to removed
4059 * @v_list: list of promisc entries
4062 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4064 struct ice_fltr_list_entry *v_list_itr, *tmp;
4066 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4067 v_list_itr->status =
4068 ice_remove_rule_internal(hw, recp_id, v_list_itr);
4069 if (v_list_itr->status)
4070 return v_list_itr->status;
4076 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4077 * @hw: pointer to the hardware structure
4078 * @vsi_handle: VSI handle to clear mode
4079 * @promisc_mask: mask of promiscuous config bits to clear
4080 * @vid: VLAN ID to clear VLAN promiscuous
4083 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4086 struct ice_switch_info *sw = hw->switch_info;
4087 struct ice_fltr_list_entry *fm_entry, *tmp;
4088 struct list_head remove_list_head;
4089 struct ice_fltr_mgmt_list_entry *itr;
4090 struct list_head *rule_head;
4091 struct mutex *rule_lock; /* Lock to protect filter rule list */
4095 if (!ice_is_vsi_valid(hw, vsi_handle))
4098 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4099 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4101 recipe_id = ICE_SW_LKUP_PROMISC;
4103 rule_head = &sw->recp_list[recipe_id].filt_rules;
4104 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4106 INIT_LIST_HEAD(&remove_list_head);
4108 mutex_lock(rule_lock);
4109 list_for_each_entry(itr, rule_head, list_entry) {
4110 struct ice_fltr_info *fltr_info;
4111 u8 fltr_promisc_mask = 0;
4113 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4115 fltr_info = &itr->fltr_info;
4117 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4118 vid != fltr_info->l_data.mac_vlan.vlan_id)
4121 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4123 /* Skip if filter is not completely specified by given mask */
4124 if (fltr_promisc_mask & ~promisc_mask)
4127 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4131 mutex_unlock(rule_lock);
4132 goto free_fltr_list;
4135 mutex_unlock(rule_lock);
4137 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4140 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4141 list_del(&fm_entry->list_entry);
4142 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4149 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4150 * @hw: pointer to the hardware structure
4151 * @vsi_handle: VSI handle to configure
4152 * @promisc_mask: mask of promiscuous config bits
4153 * @vid: VLAN ID to set VLAN promiscuous
4156 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4158 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4159 struct ice_fltr_list_entry f_list_entry;
4160 struct ice_fltr_info new_fltr;
4167 if (!ice_is_vsi_valid(hw, vsi_handle))
4169 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4171 memset(&new_fltr, 0, sizeof(new_fltr));
4173 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4174 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4175 new_fltr.l_data.mac_vlan.vlan_id = vid;
4176 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4178 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4179 recipe_id = ICE_SW_LKUP_PROMISC;
4182 /* Separate filters must be set for each direction/packet type
4183 * combination, so we will loop over the mask value, store the
4184 * individual type, and clear it out in the input mask as it
4187 while (promisc_mask) {
4193 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4194 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4195 pkt_type = UCAST_FLTR;
4196 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4197 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4198 pkt_type = UCAST_FLTR;
4200 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4201 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4202 pkt_type = MCAST_FLTR;
4203 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4204 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4205 pkt_type = MCAST_FLTR;
4207 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4208 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4209 pkt_type = BCAST_FLTR;
4210 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4211 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4212 pkt_type = BCAST_FLTR;
4216 /* Check for VLAN promiscuous flag */
4217 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4218 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4219 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4220 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4224 /* Set filter DA based on packet type */
4225 mac_addr = new_fltr.l_data.mac.mac_addr;
4226 if (pkt_type == BCAST_FLTR) {
4227 eth_broadcast_addr(mac_addr);
4228 } else if (pkt_type == MCAST_FLTR ||
4229 pkt_type == UCAST_FLTR) {
4230 /* Use the dummy ether header DA */
4231 ether_addr_copy(mac_addr, dummy_eth_header);
4232 if (pkt_type == MCAST_FLTR)
4233 mac_addr[0] |= 0x1; /* Set multicast bit */
4236 /* Need to reset this to zero for all iterations */
4239 new_fltr.flag |= ICE_FLTR_TX;
4240 new_fltr.src = hw_vsi_id;
4242 new_fltr.flag |= ICE_FLTR_RX;
4243 new_fltr.src = hw->port_info->lport;
4246 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4247 new_fltr.vsi_handle = vsi_handle;
4248 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4249 f_list_entry.fltr_info = new_fltr;
4251 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4253 goto set_promisc_exit;
4261 * ice_set_vlan_vsi_promisc
4262 * @hw: pointer to the hardware structure
4263 * @vsi_handle: VSI handle to configure
4264 * @promisc_mask: mask of promiscuous config bits
4265 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4267 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4270 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4271 bool rm_vlan_promisc)
4273 struct ice_switch_info *sw = hw->switch_info;
4274 struct ice_fltr_list_entry *list_itr, *tmp;
4275 struct list_head vsi_list_head;
4276 struct list_head *vlan_head;
4277 struct mutex *vlan_lock; /* Lock to protect filter rule list */
4281 INIT_LIST_HEAD(&vsi_list_head);
4282 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4283 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4284 mutex_lock(vlan_lock);
4285 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4287 mutex_unlock(vlan_lock);
4289 goto free_fltr_list;
4291 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4292 /* Avoid enabling or disabling VLAN zero twice when in double
4295 if (ice_is_dvm_ena(hw) &&
4296 list_itr->fltr_info.l_data.vlan.tpid == 0)
4299 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4300 if (rm_vlan_promisc)
4301 status = ice_clear_vsi_promisc(hw, vsi_handle,
4302 promisc_mask, vlan_id);
4304 status = ice_set_vsi_promisc(hw, vsi_handle,
4305 promisc_mask, vlan_id);
4306 if (status && status != -EEXIST)
4311 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4312 list_del(&list_itr->list_entry);
4313 devm_kfree(ice_hw_to_dev(hw), list_itr);
4319 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4320 * @hw: pointer to the hardware structure
4321 * @vsi_handle: VSI handle to remove filters from
4322 * @lkup: switch rule filter lookup type
4325 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4326 enum ice_sw_lkup_type lkup)
4328 struct ice_switch_info *sw = hw->switch_info;
4329 struct ice_fltr_list_entry *fm_entry;
4330 struct list_head remove_list_head;
4331 struct list_head *rule_head;
4332 struct ice_fltr_list_entry *tmp;
4333 struct mutex *rule_lock; /* Lock to protect filter rule list */
4336 INIT_LIST_HEAD(&remove_list_head);
4337 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4338 rule_head = &sw->recp_list[lkup].filt_rules;
4339 mutex_lock(rule_lock);
4340 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4342 mutex_unlock(rule_lock);
4344 goto free_fltr_list;
4347 case ICE_SW_LKUP_MAC:
4348 ice_remove_mac(hw, &remove_list_head);
4350 case ICE_SW_LKUP_VLAN:
4351 ice_remove_vlan(hw, &remove_list_head);
4353 case ICE_SW_LKUP_PROMISC:
4354 case ICE_SW_LKUP_PROMISC_VLAN:
4355 ice_remove_promisc(hw, lkup, &remove_list_head);
4357 case ICE_SW_LKUP_MAC_VLAN:
4358 case ICE_SW_LKUP_ETHERTYPE:
4359 case ICE_SW_LKUP_ETHERTYPE_MAC:
4360 case ICE_SW_LKUP_DFLT:
4361 case ICE_SW_LKUP_LAST:
4363 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4368 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4369 list_del(&fm_entry->list_entry);
4370 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4375 * ice_remove_vsi_fltr - Remove all filters for a VSI
4376 * @hw: pointer to the hardware structure
4377 * @vsi_handle: VSI handle to remove filters from
4379 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4381 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4382 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4383 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4384 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4385 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4386 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4387 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4388 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4392 * ice_alloc_res_cntr - allocating resource counter
4393 * @hw: pointer to the hardware structure
4394 * @type: type of resource
4395 * @alloc_shared: if set it is shared else dedicated
4396 * @num_items: number of entries requested for FD resource type
4397 * @counter_id: counter index returned by AQ call
4400 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4403 struct ice_aqc_alloc_free_res_elem *buf;
4407 /* Allocate resource */
4408 buf_len = struct_size(buf, elem, 1);
4409 buf = kzalloc(buf_len, GFP_KERNEL);
4413 buf->num_elems = cpu_to_le16(num_items);
4414 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4415 ICE_AQC_RES_TYPE_M) | alloc_shared);
4417 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4418 ice_aqc_opc_alloc_res, NULL);
4422 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4430 * ice_free_res_cntr - free resource counter
4431 * @hw: pointer to the hardware structure
4432 * @type: type of resource
4433 * @alloc_shared: if set it is shared else dedicated
4434 * @num_items: number of entries to be freed for FD resource type
4435 * @counter_id: counter ID resource which needs to be freed
4438 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4441 struct ice_aqc_alloc_free_res_elem *buf;
4446 buf_len = struct_size(buf, elem, 1);
4447 buf = kzalloc(buf_len, GFP_KERNEL);
4451 buf->num_elems = cpu_to_le16(num_items);
4452 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4453 ICE_AQC_RES_TYPE_M) | alloc_shared);
4454 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4456 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4457 ice_aqc_opc_free_res, NULL);
4459 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4465 /* This is mapping table entry that maps every word within a given protocol
4466 * structure to the real byte offset as per the specification of that
4468 * for example dst address is 3 words in ethertype header and corresponding
4469 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4470 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4471 * matching entry describing its field. This needs to be updated if new
4472 * structure is added to that union.
4474 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4475 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4476 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4477 { ICE_ETYPE_OL, { 0 } },
4478 { ICE_ETYPE_IL, { 0 } },
4479 { ICE_VLAN_OFOS, { 2, 0 } },
4480 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4481 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4482 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4483 26, 28, 30, 32, 34, 36, 38 } },
4484 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4485 26, 28, 30, 32, 34, 36, 38 } },
4486 { ICE_TCP_IL, { 0, 2 } },
4487 { ICE_UDP_OF, { 0, 2 } },
4488 { ICE_UDP_ILOS, { 0, 2 } },
4489 { ICE_VXLAN, { 8, 10, 12, 14 } },
4490 { ICE_GENEVE, { 8, 10, 12, 14 } },
4491 { ICE_NVGRE, { 0, 2, 4, 6 } },
4492 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
4493 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
4494 { ICE_PPPOE, { 0, 2, 4, 6 } },
4495 { ICE_VLAN_EX, { 2, 0 } },
4496 { ICE_VLAN_IN, { 2, 0 } },
4499 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4500 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4501 { ICE_MAC_IL, ICE_MAC_IL_HW },
4502 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4503 { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
4504 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4505 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4506 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4507 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4508 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4509 { ICE_TCP_IL, ICE_TCP_IL_HW },
4510 { ICE_UDP_OF, ICE_UDP_OF_HW },
4511 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4512 { ICE_VXLAN, ICE_UDP_OF_HW },
4513 { ICE_GENEVE, ICE_UDP_OF_HW },
4514 { ICE_NVGRE, ICE_GRE_OF_HW },
4515 { ICE_GTP, ICE_UDP_OF_HW },
4516 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
4517 { ICE_PPPOE, ICE_PPPOE_HW },
4518 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
4519 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
4523 * ice_find_recp - find a recipe
4524 * @hw: pointer to the hardware structure
4525 * @lkup_exts: extension sequence to match
4526 * @tun_type: type of recipe tunnel
4528 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4531 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4532 enum ice_sw_tunnel_type tun_type)
4534 bool refresh_required = true;
4535 struct ice_sw_recipe *recp;
4538 /* Walk through existing recipes to find a match */
4539 recp = hw->switch_info->recp_list;
4540 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4541 /* If recipe was not created for this ID, in SW bookkeeping,
4542 * check if FW has an entry for this recipe. If the FW has an
4543 * entry update it in our SW bookkeeping and continue with the
4546 if (!recp[i].recp_created)
4547 if (ice_get_recp_frm_fw(hw,
4548 hw->switch_info->recp_list, i,
4552 /* Skip inverse action recipes */
4553 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4554 ICE_AQ_RECIPE_ACT_INV_ACT)
4557 /* if number of words we are looking for match */
4558 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4559 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4560 struct ice_fv_word *be = lkup_exts->fv_words;
4561 u16 *cr = recp[i].lkup_exts.field_mask;
4562 u16 *de = lkup_exts->field_mask;
4566 /* ar, cr, and qr are related to the recipe words, while
4567 * be, de, and pe are related to the lookup words
4569 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4570 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4572 if (ar[qr].off == be[pe].off &&
4573 ar[qr].prot_id == be[pe].prot_id &&
4575 /* Found the "pe"th word in the
4580 /* After walking through all the words in the
4581 * "i"th recipe if "p"th word was not found then
4582 * this recipe is not what we are looking for.
4583 * So break out from this loop and try the next
4586 if (qr >= recp[i].lkup_exts.n_val_words) {
4591 /* If for "i"th recipe the found was never set to false
4592 * then it means we found our match
4593 * Also tun type of recipe needs to be checked
4595 if (found && recp[i].tun_type == tun_type)
4596 return i; /* Return the recipe ID */
4599 return ICE_MAX_NUM_RECIPES;
4603 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4605 * As protocol id for outer vlan is different in dvm and svm, if dvm is
4606 * supported protocol array record for outer vlan has to be modified to
4607 * reflect the value proper for DVM.
4609 void ice_change_proto_id_to_dvm(void)
4613 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4614 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4615 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4616 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4620 * ice_prot_type_to_id - get protocol ID from protocol type
4621 * @type: protocol type
4622 * @id: pointer to variable that will receive the ID
4624 * Returns true if found, false otherwise
4626 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4630 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4631 if (ice_prot_id_tbl[i].type == type) {
4632 *id = ice_prot_id_tbl[i].protocol_id;
4639 * ice_fill_valid_words - count valid words
4640 * @rule: advanced rule with lookup information
4641 * @lkup_exts: byte offset extractions of the words that are valid
4643 * calculate valid words in a lookup rule using mask value
4646 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4647 struct ice_prot_lkup_ext *lkup_exts)
4649 u8 j, word, prot_id, ret_val;
4651 if (!ice_prot_type_to_id(rule->type, &prot_id))
4654 word = lkup_exts->n_val_words;
4656 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4657 if (((u16 *)&rule->m_u)[j] &&
4658 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4659 /* No more space to accommodate */
4660 if (word >= ICE_MAX_CHAIN_WORDS)
4662 lkup_exts->fv_words[word].off =
4663 ice_prot_ext[rule->type].offs[j];
4664 lkup_exts->fv_words[word].prot_id =
4665 ice_prot_id_tbl[rule->type].protocol_id;
4666 lkup_exts->field_mask[word] =
4667 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4671 ret_val = word - lkup_exts->n_val_words;
4672 lkup_exts->n_val_words = word;
4678 * ice_create_first_fit_recp_def - Create a recipe grouping
4679 * @hw: pointer to the hardware structure
4680 * @lkup_exts: an array of protocol header extractions
4681 * @rg_list: pointer to a list that stores new recipe groups
4682 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4684 * Using first fit algorithm, take all the words that are still not done
4685 * and start grouping them in 4-word groups. Each group makes up one
4689 ice_create_first_fit_recp_def(struct ice_hw *hw,
4690 struct ice_prot_lkup_ext *lkup_exts,
4691 struct list_head *rg_list,
4694 struct ice_pref_recipe_group *grp = NULL;
4699 /* Walk through every word in the rule to check if it is not done. If so
4700 * then this word needs to be part of a new recipe.
4702 for (j = 0; j < lkup_exts->n_val_words; j++)
4703 if (!test_bit(j, lkup_exts->done)) {
4705 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4706 struct ice_recp_grp_entry *entry;
4708 entry = devm_kzalloc(ice_hw_to_dev(hw),
4713 list_add(&entry->l_entry, rg_list);
4714 grp = &entry->r_group;
4718 grp->pairs[grp->n_val_pairs].prot_id =
4719 lkup_exts->fv_words[j].prot_id;
4720 grp->pairs[grp->n_val_pairs].off =
4721 lkup_exts->fv_words[j].off;
4722 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4730 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4731 * @hw: pointer to the hardware structure
4732 * @fv_list: field vector with the extraction sequence information
4733 * @rg_list: recipe groupings with protocol-offset pairs
4735 * Helper function to fill in the field vector indices for protocol-offset
4736 * pairs. These indexes are then ultimately programmed into a recipe.
4739 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4740 struct list_head *rg_list)
4742 struct ice_sw_fv_list_entry *fv;
4743 struct ice_recp_grp_entry *rg;
4744 struct ice_fv_word *fv_ext;
4746 if (list_empty(fv_list))
4749 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4751 fv_ext = fv->fv_ptr->ew;
4753 list_for_each_entry(rg, rg_list, l_entry) {
4756 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4757 struct ice_fv_word *pr;
4762 pr = &rg->r_group.pairs[i];
4763 mask = rg->r_group.mask[i];
4765 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4766 if (fv_ext[j].prot_id == pr->prot_id &&
4767 fv_ext[j].off == pr->off) {
4770 /* Store index of field vector */
4772 rg->fv_mask[i] = mask;
4776 /* Protocol/offset could not be found, caller gave an
4788 * ice_find_free_recp_res_idx - find free result indexes for recipe
4789 * @hw: pointer to hardware structure
4790 * @profiles: bitmap of profiles that will be associated with the new recipe
4791 * @free_idx: pointer to variable to receive the free index bitmap
4793 * The algorithm used here is:
4794 * 1. When creating a new recipe, create a set P which contains all
4795 * Profiles that will be associated with our new recipe
4797 * 2. For each Profile p in set P:
4798 * a. Add all recipes associated with Profile p into set R
4799 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4800 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4801 * i. Or just assume they all have the same possible indexes:
4803 * i.e., PossibleIndexes = 0x0000F00000000000
4805 * 3. For each Recipe r in set R:
4806 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4807 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4809 * FreeIndexes will contain the bits indicating the indexes free for use,
4810 * then the code needs to update the recipe[r].used_result_idx_bits to
4811 * indicate which indexes were selected for use by this recipe.
4814 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4815 unsigned long *free_idx)
4817 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4818 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4819 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4822 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4823 bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4825 bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4827 /* For each profile we are going to associate the recipe with, add the
4828 * recipes that are associated with that profile. This will give us
4829 * the set of recipes that our recipe may collide with. Also, determine
4830 * what possible result indexes are usable given this set of profiles.
4832 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4833 bitmap_or(recipes, recipes, profile_to_recipe[bit],
4834 ICE_MAX_NUM_RECIPES);
4835 bitmap_and(possible_idx, possible_idx,
4836 hw->switch_info->prof_res_bm[bit],
4840 /* For each recipe that our new recipe may collide with, determine
4841 * which indexes have been used.
4843 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4844 bitmap_or(used_idx, used_idx,
4845 hw->switch_info->recp_list[bit].res_idxs,
4848 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4850 /* return number of free indexes */
4851 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4855 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4856 * @hw: pointer to hardware structure
4857 * @rm: recipe management list entry
4858 * @profiles: bitmap of profiles that will be associated.
4861 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4862 unsigned long *profiles)
4864 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4865 struct ice_aqc_recipe_data_elem *tmp;
4866 struct ice_aqc_recipe_data_elem *buf;
4867 struct ice_recp_grp_entry *entry;
4874 /* When more than one recipe are required, another recipe is needed to
4875 * chain them together. Matching a tunnel metadata ID takes up one of
4876 * the match fields in the chaining recipe reducing the number of
4877 * chained recipes by one.
4879 /* check number of free result indices */
4880 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4881 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4883 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4884 free_res_idx, rm->n_grp_count);
4886 if (rm->n_grp_count > 1) {
4887 if (rm->n_grp_count > free_res_idx)
4893 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4896 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4900 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4907 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4908 recipe_count = ICE_MAX_NUM_RECIPES;
4909 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4911 if (status || recipe_count == 0)
4914 /* Allocate the recipe resources, and configure them according to the
4915 * match fields from protocol headers and extracted field vectors.
4917 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
4918 list_for_each_entry(entry, &rm->rg_list, l_entry) {
4921 status = ice_alloc_recipe(hw, &entry->rid);
4925 /* Clear the result index of the located recipe, as this will be
4926 * updated, if needed, later in the recipe creation process.
4928 tmp[0].content.result_indx = 0;
4930 buf[recps] = tmp[0];
4931 buf[recps].recipe_indx = (u8)entry->rid;
4932 /* if the recipe is a non-root recipe RID should be programmed
4933 * as 0 for the rules to be applied correctly.
4935 buf[recps].content.rid = 0;
4936 memset(&buf[recps].content.lkup_indx, 0,
4937 sizeof(buf[recps].content.lkup_indx));
4939 /* All recipes use look-up index 0 to match switch ID. */
4940 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4941 buf[recps].content.mask[0] =
4942 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
4943 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4946 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4947 buf[recps].content.lkup_indx[i] = 0x80;
4948 buf[recps].content.mask[i] = 0;
4951 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4952 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4953 buf[recps].content.mask[i + 1] =
4954 cpu_to_le16(entry->fv_mask[i]);
4957 if (rm->n_grp_count > 1) {
4958 /* Checks to see if there really is a valid result index
4961 if (chain_idx >= ICE_MAX_FV_WORDS) {
4962 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
4967 entry->chain_idx = chain_idx;
4968 buf[recps].content.result_indx =
4969 ICE_AQ_RECIPE_RESULT_EN |
4970 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4971 ICE_AQ_RECIPE_RESULT_DATA_M);
4972 clear_bit(chain_idx, result_idx_bm);
4973 chain_idx = find_first_bit(result_idx_bm,
4977 /* fill recipe dependencies */
4978 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
4979 ICE_MAX_NUM_RECIPES);
4980 set_bit(buf[recps].recipe_indx,
4981 (unsigned long *)buf[recps].recipe_bitmap);
4982 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4986 if (rm->n_grp_count == 1) {
4987 rm->root_rid = buf[0].recipe_indx;
4988 set_bit(buf[0].recipe_indx, rm->r_bitmap);
4989 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4990 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4991 memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4992 sizeof(buf[0].recipe_bitmap));
4997 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4998 * the recipe which is getting created if specified
4999 * by user. Usually any advanced switch filter, which results
5000 * into new extraction sequence, ended up creating a new recipe
5001 * of type ROOT and usually recipes are associated with profiles
5002 * Switch rule referreing newly created recipe, needs to have
5003 * either/or 'fwd' or 'join' priority, otherwise switch rule
5004 * evaluation will not happen correctly. In other words, if
5005 * switch rule to be evaluated on priority basis, then recipe
5006 * needs to have priority, otherwise it will be evaluated last.
5008 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5010 struct ice_recp_grp_entry *last_chain_entry;
5013 /* Allocate the last recipe that will chain the outcomes of the
5014 * other recipes together
5016 status = ice_alloc_recipe(hw, &rid);
5020 buf[recps].recipe_indx = (u8)rid;
5021 buf[recps].content.rid = (u8)rid;
5022 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5023 /* the new entry created should also be part of rg_list to
5024 * make sure we have complete recipe
5026 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5027 sizeof(*last_chain_entry),
5029 if (!last_chain_entry) {
5033 last_chain_entry->rid = rid;
5034 memset(&buf[recps].content.lkup_indx, 0,
5035 sizeof(buf[recps].content.lkup_indx));
5036 /* All recipes use look-up index 0 to match switch ID. */
5037 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5038 buf[recps].content.mask[0] =
5039 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5040 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5041 buf[recps].content.lkup_indx[i] =
5042 ICE_AQ_RECIPE_LKUP_IGNORE;
5043 buf[recps].content.mask[i] = 0;
5047 /* update r_bitmap with the recp that is used for chaining */
5048 set_bit(rid, rm->r_bitmap);
5049 /* this is the recipe that chains all the other recipes so it
5050 * should not have a chaining ID to indicate the same
5052 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5053 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5054 last_chain_entry->fv_idx[i] = entry->chain_idx;
5055 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5056 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5057 set_bit(entry->rid, rm->r_bitmap);
5059 list_add(&last_chain_entry->l_entry, &rm->rg_list);
5060 if (sizeof(buf[recps].recipe_bitmap) >=
5061 sizeof(rm->r_bitmap)) {
5062 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5063 sizeof(buf[recps].recipe_bitmap));
5068 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5071 rm->root_rid = (u8)rid;
5073 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5077 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5078 ice_release_change_lock(hw);
5082 /* Every recipe that just got created add it to the recipe
5085 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5086 struct ice_switch_info *sw = hw->switch_info;
5087 bool is_root, idx_found = false;
5088 struct ice_sw_recipe *recp;
5089 u16 idx, buf_idx = 0;
5091 /* find buffer index for copying some data */
5092 for (idx = 0; idx < rm->n_grp_count; idx++)
5093 if (buf[idx].recipe_indx == entry->rid) {
5103 recp = &sw->recp_list[entry->rid];
5104 is_root = (rm->root_rid == entry->rid);
5105 recp->is_root = is_root;
5107 recp->root_rid = entry->rid;
5108 recp->big_recp = (is_root && rm->n_grp_count > 1);
5110 memcpy(&recp->ext_words, entry->r_group.pairs,
5111 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5113 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5114 sizeof(recp->r_bitmap));
5116 /* Copy non-result fv index values and masks to recipe. This
5117 * call will also update the result recipe bitmask.
5119 ice_collect_result_idx(&buf[buf_idx], recp);
5121 /* for non-root recipes, also copy to the root, this allows
5122 * easier matching of a complete chained recipe
5125 ice_collect_result_idx(&buf[buf_idx],
5126 &sw->recp_list[rm->root_rid]);
5128 recp->n_ext_words = entry->r_group.n_val_pairs;
5129 recp->chain_idx = entry->chain_idx;
5130 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5131 recp->n_grp_count = rm->n_grp_count;
5132 recp->tun_type = rm->tun_type;
5133 recp->recp_created = true;
5142 devm_kfree(ice_hw_to_dev(hw), buf);
5147 * ice_create_recipe_group - creates recipe group
5148 * @hw: pointer to hardware structure
5149 * @rm: recipe management list entry
5150 * @lkup_exts: lookup elements
5153 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5154 struct ice_prot_lkup_ext *lkup_exts)
5159 rm->n_grp_count = 0;
5161 /* Create recipes for words that are marked not done by packing them
5164 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5165 &rm->rg_list, &recp_count);
5167 rm->n_grp_count += recp_count;
5168 rm->n_ext_words = lkup_exts->n_val_words;
5169 memcpy(&rm->ext_words, lkup_exts->fv_words,
5170 sizeof(rm->ext_words));
5171 memcpy(rm->word_masks, lkup_exts->field_mask,
5172 sizeof(rm->word_masks));
5179 * ice_tun_type_match_word - determine if tun type needs a match mask
5180 * @tun_type: tunnel type
5181 * @mask: mask to be used for the tunnel
5183 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
5186 case ICE_SW_TUN_GENEVE:
5187 case ICE_SW_TUN_VXLAN:
5188 case ICE_SW_TUN_NVGRE:
5189 case ICE_SW_TUN_GTPU:
5190 case ICE_SW_TUN_GTPC:
5191 *mask = ICE_TUN_FLAG_MASK;
5201 * ice_add_special_words - Add words that are not protocols, such as metadata
5202 * @rinfo: other information regarding the rule e.g. priority and action info
5203 * @lkup_exts: lookup word structure
5204 * @dvm_ena: is double VLAN mode enabled
5207 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5208 struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
5212 /* If this is a tunneled packet, then add recipe index to match the
5213 * tunnel bit in the packet metadata flags.
5215 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
5216 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5217 u8 word = lkup_exts->n_val_words++;
5219 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5220 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
5221 lkup_exts->field_mask[word] = mask;
5227 if (rinfo->vlan_type != 0 && dvm_ena) {
5228 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5229 u8 word = lkup_exts->n_val_words++;
5231 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5232 lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
5233 lkup_exts->field_mask[word] =
5234 ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
5243 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5244 * @hw: pointer to hardware structure
5245 * @rinfo: other information regarding the rule e.g. priority and action info
5246 * @bm: pointer to memory for returning the bitmap of field vectors
5249 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5252 enum ice_prof_type prof_type;
5254 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5256 switch (rinfo->tun_type) {
5258 prof_type = ICE_PROF_NON_TUN;
5260 case ICE_ALL_TUNNELS:
5261 prof_type = ICE_PROF_TUN_ALL;
5263 case ICE_SW_TUN_GENEVE:
5264 case ICE_SW_TUN_VXLAN:
5265 prof_type = ICE_PROF_TUN_UDP;
5267 case ICE_SW_TUN_NVGRE:
5268 prof_type = ICE_PROF_TUN_GRE;
5270 case ICE_SW_TUN_GTPU:
5271 prof_type = ICE_PROF_TUN_GTPU;
5273 case ICE_SW_TUN_GTPC:
5274 prof_type = ICE_PROF_TUN_GTPC;
5276 case ICE_SW_TUN_AND_NON_TUN:
5278 prof_type = ICE_PROF_ALL;
5282 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5286 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5287 * @hw: pointer to hardware structure
5288 * @lkups: lookup elements or match criteria for the advanced recipe, one
5289 * structure per protocol header
5290 * @lkups_cnt: number of protocols
5291 * @rinfo: other information regarding the rule e.g. priority and action info
5292 * @rid: return the recipe ID of the recipe created
5295 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5296 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5298 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5299 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5300 struct ice_prot_lkup_ext *lkup_exts;
5301 struct ice_recp_grp_entry *r_entry;
5302 struct ice_sw_fv_list_entry *fvit;
5303 struct ice_recp_grp_entry *r_tmp;
5304 struct ice_sw_fv_list_entry *tmp;
5305 struct ice_sw_recipe *rm;
5312 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5316 /* Determine the number of words to be matched and if it exceeds a
5317 * recipe's restrictions
5319 for (i = 0; i < lkups_cnt; i++) {
5322 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5324 goto err_free_lkup_exts;
5327 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5330 goto err_free_lkup_exts;
5334 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5337 goto err_free_lkup_exts;
5340 /* Get field vectors that contain fields extracted from all the protocol
5341 * headers being programmed.
5343 INIT_LIST_HEAD(&rm->fv_list);
5344 INIT_LIST_HEAD(&rm->rg_list);
5346 /* Get bitmap of field vectors (profiles) that are compatible with the
5347 * rule request; only these will be searched in the subsequent call to
5348 * ice_get_sw_fv_list.
5350 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5352 status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5356 /* Create any special protocol/offset pairs, such as looking at tunnel
5357 * bits by extracting metadata
5359 status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
5361 goto err_free_lkup_exts;
5363 /* Group match words into recipes using preferred recipe grouping
5366 status = ice_create_recipe_group(hw, rm, lkup_exts);
5370 /* set the recipe priority if specified */
5371 rm->priority = (u8)rinfo->priority;
5373 /* Find offsets from the field vector. Pick the first one for all the
5376 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5380 /* get bitmap of all profiles the recipe will be associated with */
5381 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5382 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5383 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5384 set_bit((u16)fvit->profile_id, profiles);
5387 /* Look for a recipe which matches our requested fv / mask list */
5388 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5389 if (*rid < ICE_MAX_NUM_RECIPES)
5390 /* Success if found a recipe that match the existing criteria */
5393 rm->tun_type = rinfo->tun_type;
5394 /* Recipe we need does not exist, add a recipe */
5395 status = ice_add_sw_recipe(hw, rm, profiles);
5399 /* Associate all the recipes created with all the profiles in the
5400 * common field vector.
5402 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5403 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5406 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5407 (u8 *)r_bitmap, NULL);
5411 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5412 ICE_MAX_NUM_RECIPES);
5413 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5417 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5420 ice_release_change_lock(hw);
5425 /* Update profile to recipe bitmap array */
5426 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5427 ICE_MAX_NUM_RECIPES);
5429 /* Update recipe to profile bitmap array */
5430 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5431 set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5434 *rid = rm->root_rid;
5435 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5436 sizeof(*lkup_exts));
5438 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5439 list_del(&r_entry->l_entry);
5440 devm_kfree(ice_hw_to_dev(hw), r_entry);
5443 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5444 list_del(&fvit->list_entry);
5445 devm_kfree(ice_hw_to_dev(hw), fvit);
5449 devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5460 * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5462 * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5463 * @num_vlan: number of VLAN tags
5465 static struct ice_dummy_pkt_profile *
5466 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5469 struct ice_dummy_pkt_profile *profile;
5470 struct ice_dummy_pkt_offsets *offsets;
5471 u32 buf_len, off, etype_off, i;
5474 if (num_vlan < 1 || num_vlan > 2)
5475 return ERR_PTR(-EINVAL);
5477 off = num_vlan * VLAN_HLEN;
5479 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5480 dummy_pkt->offsets_len;
5481 offsets = kzalloc(buf_len, GFP_KERNEL);
5483 return ERR_PTR(-ENOMEM);
5485 offsets[0] = dummy_pkt->offsets[0];
5486 if (num_vlan == 2) {
5487 offsets[1] = ice_dummy_qinq_packet_offsets[0];
5488 offsets[2] = ice_dummy_qinq_packet_offsets[1];
5489 } else if (num_vlan == 1) {
5490 offsets[1] = ice_dummy_vlan_packet_offsets[0];
5493 for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5494 offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5495 offsets[i + num_vlan].offset =
5496 dummy_pkt->offsets[i].offset + off;
5498 offsets[i + num_vlan] = dummy_pkt->offsets[i];
5500 etype_off = dummy_pkt->offsets[1].offset;
5502 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5504 pkt = kzalloc(buf_len, GFP_KERNEL);
5507 return ERR_PTR(-ENOMEM);
5510 memcpy(pkt, dummy_pkt->pkt, etype_off);
5511 memcpy(pkt + etype_off,
5512 num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5514 memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5515 dummy_pkt->pkt_len - etype_off);
5517 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5521 return ERR_PTR(-ENOMEM);
5524 profile->offsets = offsets;
5526 profile->pkt_len = buf_len;
5527 profile->match |= ICE_PKT_KMALLOC;
5533 * ice_find_dummy_packet - find dummy packet
5535 * @lkups: lookup elements or match criteria for the advanced recipe, one
5536 * structure per protocol header
5537 * @lkups_cnt: number of protocols
5538 * @tun_type: tunnel type
5540 * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5542 static const struct ice_dummy_pkt_profile *
5543 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5544 enum ice_sw_tunnel_type tun_type)
5546 const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5547 u32 match = 0, vlan_count = 0;
5551 case ICE_SW_TUN_GTPC:
5552 match |= ICE_PKT_TUN_GTPC;
5554 case ICE_SW_TUN_GTPU:
5555 match |= ICE_PKT_TUN_GTPU;
5557 case ICE_SW_TUN_NVGRE:
5558 match |= ICE_PKT_TUN_NVGRE;
5560 case ICE_SW_TUN_GENEVE:
5561 case ICE_SW_TUN_VXLAN:
5562 match |= ICE_PKT_TUN_UDP;
5568 for (i = 0; i < lkups_cnt; i++) {
5569 if (lkups[i].type == ICE_UDP_ILOS)
5570 match |= ICE_PKT_INNER_UDP;
5571 else if (lkups[i].type == ICE_TCP_IL)
5572 match |= ICE_PKT_INNER_TCP;
5573 else if (lkups[i].type == ICE_IPV6_OFOS)
5574 match |= ICE_PKT_OUTER_IPV6;
5575 else if (lkups[i].type == ICE_VLAN_OFOS ||
5576 lkups[i].type == ICE_VLAN_EX)
5578 else if (lkups[i].type == ICE_VLAN_IN)
5580 else if (lkups[i].type == ICE_ETYPE_OL &&
5581 lkups[i].h_u.ethertype.ethtype_id ==
5582 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5583 lkups[i].m_u.ethertype.ethtype_id ==
5584 cpu_to_be16(0xFFFF))
5585 match |= ICE_PKT_OUTER_IPV6;
5586 else if (lkups[i].type == ICE_ETYPE_IL &&
5587 lkups[i].h_u.ethertype.ethtype_id ==
5588 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5589 lkups[i].m_u.ethertype.ethtype_id ==
5590 cpu_to_be16(0xFFFF))
5591 match |= ICE_PKT_INNER_IPV6;
5592 else if (lkups[i].type == ICE_IPV6_IL)
5593 match |= ICE_PKT_INNER_IPV6;
5594 else if (lkups[i].type == ICE_GTP_NO_PAY)
5595 match |= ICE_PKT_GTP_NOPAY;
5596 else if (lkups[i].type == ICE_PPPOE) {
5597 match |= ICE_PKT_PPPOE;
5598 if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5600 match |= ICE_PKT_OUTER_IPV6;
5604 while (ret->match && (match & ret->match) != ret->match)
5607 if (vlan_count != 0)
5608 ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5614 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5616 * @lkups: lookup elements or match criteria for the advanced recipe, one
5617 * structure per protocol header
5618 * @lkups_cnt: number of protocols
5619 * @s_rule: stores rule information from the match criteria
5620 * @profile: dummy packet profile (the template, its size and header offsets)
5623 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5624 struct ice_sw_rule_lkup_rx_tx *s_rule,
5625 const struct ice_dummy_pkt_profile *profile)
5630 /* Start with a packet with a pre-defined/dummy content. Then, fill
5631 * in the header values to be looked up or matched.
5633 pkt = s_rule->hdr_data;
5635 memcpy(pkt, profile->pkt, profile->pkt_len);
5637 for (i = 0; i < lkups_cnt; i++) {
5638 const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5639 enum ice_protocol_type type;
5640 u16 offset = 0, len = 0, j;
5643 /* find the start of this layer; it should be found since this
5644 * was already checked when search for the dummy packet
5646 type = lkups[i].type;
5647 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5648 if (type == offsets[j].type) {
5649 offset = offsets[j].offset;
5654 /* this should never happen in a correct calling sequence */
5658 switch (lkups[i].type) {
5661 len = sizeof(struct ice_ether_hdr);
5665 len = sizeof(struct ice_ethtype_hdr);
5670 len = sizeof(struct ice_vlan_hdr);
5674 len = sizeof(struct ice_ipv4_hdr);
5678 len = sizeof(struct ice_ipv6_hdr);
5683 len = sizeof(struct ice_l4_hdr);
5686 len = sizeof(struct ice_sctp_hdr);
5689 len = sizeof(struct ice_nvgre_hdr);
5693 len = sizeof(struct ice_udp_tnl_hdr);
5695 case ICE_GTP_NO_PAY:
5697 len = sizeof(struct ice_udp_gtp_hdr);
5700 len = sizeof(struct ice_pppoe_hdr);
5706 /* the length should be a word multiple */
5707 if (len % ICE_BYTES_PER_WORD)
5710 /* We have the offset to the header start, the length, the
5711 * caller's header values and mask. Use this information to
5712 * copy the data into the dummy packet appropriately based on
5713 * the mask. Note that we need to only write the bits as
5714 * indicated by the mask to make sure we don't improperly write
5715 * over any significant packet data.
5717 for (j = 0; j < len / sizeof(u16); j++) {
5718 u16 *ptr = (u16 *)(pkt + offset);
5719 u16 mask = lkups[i].m_raw[j];
5724 ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5728 s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5734 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5735 * @hw: pointer to the hardware structure
5736 * @tun_type: tunnel type
5737 * @pkt: dummy packet to fill in
5738 * @offsets: offset info for the dummy packet
5741 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5742 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5747 case ICE_SW_TUN_VXLAN:
5748 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5751 case ICE_SW_TUN_GENEVE:
5752 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5756 /* Nothing needs to be done for this tunnel type */
5760 /* Find the outer UDP protocol header and insert the port number */
5761 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5762 if (offsets[i].type == ICE_UDP_OF) {
5763 struct ice_l4_hdr *hdr;
5766 offset = offsets[i].offset;
5767 hdr = (struct ice_l4_hdr *)&pkt[offset];
5768 hdr->dst_port = cpu_to_be16(open_port);
5778 * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5779 * @vlan_type: VLAN tag type
5780 * @pkt: dummy packet to fill in
5781 * @offsets: offset info for the dummy packet
5784 ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
5785 const struct ice_dummy_pkt_offsets *offsets)
5789 /* Find VLAN header and insert VLAN TPID */
5790 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5791 if (offsets[i].type == ICE_VLAN_OFOS ||
5792 offsets[i].type == ICE_VLAN_EX) {
5793 struct ice_vlan_hdr *hdr;
5796 offset = offsets[i].offset;
5797 hdr = (struct ice_vlan_hdr *)&pkt[offset];
5798 hdr->type = cpu_to_be16(vlan_type);
5808 * ice_find_adv_rule_entry - Search a rule entry
5809 * @hw: pointer to the hardware structure
5810 * @lkups: lookup elements or match criteria for the advanced recipe, one
5811 * structure per protocol header
5812 * @lkups_cnt: number of protocols
5813 * @recp_id: recipe ID for which we are finding the rule
5814 * @rinfo: other information regarding the rule e.g. priority and action info
5816 * Helper function to search for a given advance rule entry
5817 * Returns pointer to entry storing the rule if found
5819 static struct ice_adv_fltr_mgmt_list_entry *
5820 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5821 u16 lkups_cnt, u16 recp_id,
5822 struct ice_adv_rule_info *rinfo)
5824 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5825 struct ice_switch_info *sw = hw->switch_info;
5828 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5830 bool lkups_matched = true;
5832 if (lkups_cnt != list_itr->lkups_cnt)
5834 for (i = 0; i < list_itr->lkups_cnt; i++)
5835 if (memcmp(&list_itr->lkups[i], &lkups[i],
5837 lkups_matched = false;
5840 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5841 rinfo->tun_type == list_itr->rule_info.tun_type &&
5842 rinfo->vlan_type == list_itr->rule_info.vlan_type &&
5850 * ice_adv_add_update_vsi_list
5851 * @hw: pointer to the hardware structure
5852 * @m_entry: pointer to current adv filter management list entry
5853 * @cur_fltr: filter information from the book keeping entry
5854 * @new_fltr: filter information with the new VSI to be added
5856 * Call AQ command to add or update previously created VSI list with new VSI.
5858 * Helper function to do book keeping associated with adding filter information
5859 * The algorithm to do the booking keeping is described below :
5860 * When a VSI needs to subscribe to a given advanced filter
5861 * if only one VSI has been added till now
5862 * Allocate a new VSI list and add two VSIs
5863 * to this list using switch rule command
5864 * Update the previously created switch rule with the
5865 * newly created VSI list ID
5866 * if a VSI list was previously created
5867 * Add the new VSI to the previously created VSI list set
5868 * using the update switch rule command
5871 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5872 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5873 struct ice_adv_rule_info *cur_fltr,
5874 struct ice_adv_rule_info *new_fltr)
5876 u16 vsi_list_id = 0;
5879 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5880 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5881 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5884 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5885 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5886 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5887 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5890 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5891 /* Only one entry existed in the mapping and it was not already
5892 * a part of a VSI list. So, create a VSI list with the old and
5895 struct ice_fltr_info tmp_fltr;
5896 u16 vsi_handle_arr[2];
5898 /* A rule already exists with the new VSI being added */
5899 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5900 new_fltr->sw_act.fwd_id.hw_vsi_id)
5903 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5904 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5905 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5911 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5912 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5913 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5914 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5915 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5916 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5918 /* Update the previous switch rule of "forward to VSI" to
5921 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5925 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5926 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5927 m_entry->vsi_list_info =
5928 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5931 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5933 if (!m_entry->vsi_list_info)
5936 /* A rule already exists with the new VSI being added */
5937 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5940 /* Update the previously created VSI list set with
5941 * the new VSI ID passed in
5943 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5945 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5947 ice_aqc_opc_update_sw_rules,
5949 /* update VSI list mapping info with new VSI ID */
5951 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5954 m_entry->vsi_count++;
5959 * ice_add_adv_rule - helper function to create an advanced switch rule
5960 * @hw: pointer to the hardware structure
5961 * @lkups: information on the words that needs to be looked up. All words
5962 * together makes one recipe
5963 * @lkups_cnt: num of entries in the lkups array
5964 * @rinfo: other information related to the rule that needs to be programmed
5965 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5966 * ignored is case of error.
5968 * This function can program only 1 rule at a time. The lkups is used to
5969 * describe the all the words that forms the "lookup" portion of the recipe.
5970 * These words can span multiple protocols. Callers to this function need to
5971 * pass in a list of protocol headers with lookup information along and mask
5972 * that determines which words are valid from the given protocol header.
5973 * rinfo describes other information related to this rule such as forwarding
5974 * IDs, priority of this rule, etc.
5977 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5978 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5979 struct ice_rule_query_data *added_entry)
5981 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5982 struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
5983 const struct ice_dummy_pkt_profile *profile;
5984 u16 rid = 0, i, rule_buf_sz, vsi_handle;
5985 struct list_head *rule_head;
5986 struct ice_switch_info *sw;
5992 /* Initialize profile to result index bitmap */
5993 if (!hw->switch_info->prof_res_bm_init) {
5994 hw->switch_info->prof_res_bm_init = 1;
5995 ice_init_prof_result_bm(hw);
6001 /* get # of words we need to match */
6003 for (i = 0; i < lkups_cnt; i++) {
6006 for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6007 if (lkups[i].m_raw[j])
6014 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6017 /* locate a dummy packet */
6018 profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6019 if (IS_ERR(profile))
6020 return PTR_ERR(profile);
6022 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6023 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6024 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6025 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
6027 goto free_pkt_profile;
6030 vsi_handle = rinfo->sw_act.vsi_handle;
6031 if (!ice_is_vsi_valid(hw, vsi_handle)) {
6033 goto free_pkt_profile;
6036 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6037 rinfo->sw_act.fwd_id.hw_vsi_id =
6038 ice_get_hw_vsi_num(hw, vsi_handle);
6039 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6040 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6042 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6044 goto free_pkt_profile;
6045 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6047 /* we have to add VSI to VSI_LIST and increment vsi_count.
6048 * Also Update VSI list so that we can change forwarding rule
6049 * if the rule already exists, we will check if it exists with
6050 * same vsi_id, if not then add it to the VSI list if it already
6051 * exists if not then create a VSI list and add the existing VSI
6052 * ID and the new VSI ID to the list
6053 * We will add that VSI to the list
6055 status = ice_adv_add_update_vsi_list(hw, m_entry,
6056 &m_entry->rule_info,
6059 added_entry->rid = rid;
6060 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6061 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6063 goto free_pkt_profile;
6065 rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6066 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6069 goto free_pkt_profile;
6071 if (!rinfo->flags_info.act_valid) {
6072 act |= ICE_SINGLE_ACT_LAN_ENABLE;
6073 act |= ICE_SINGLE_ACT_LB_ENABLE;
6075 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6076 ICE_SINGLE_ACT_LB_ENABLE);
6079 switch (rinfo->sw_act.fltr_act) {
6080 case ICE_FWD_TO_VSI:
6081 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6082 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6083 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6086 act |= ICE_SINGLE_ACT_TO_Q;
6087 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6088 ICE_SINGLE_ACT_Q_INDEX_M;
6090 case ICE_FWD_TO_QGRP:
6091 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6092 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6093 act |= ICE_SINGLE_ACT_TO_Q;
6094 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6095 ICE_SINGLE_ACT_Q_INDEX_M;
6096 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6097 ICE_SINGLE_ACT_Q_REGION_M;
6099 case ICE_DROP_PACKET:
6100 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6101 ICE_SINGLE_ACT_VALID_BIT;
6105 goto err_ice_add_adv_rule;
6108 /* set the rule LOOKUP type based on caller specified 'Rx'
6109 * instead of hardcoding it to be either LOOKUP_TX/RX
6111 * for 'Rx' set the source to be the port number
6112 * for 'Tx' set the source to be the source HW VSI number (determined
6116 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6117 s_rule->src = cpu_to_le16(hw->port_info->lport);
6119 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6120 s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6123 s_rule->recipe_id = cpu_to_le16(rid);
6124 s_rule->act = cpu_to_le32(act);
6126 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6128 goto err_ice_add_adv_rule;
6130 if (rinfo->tun_type != ICE_NON_TUN &&
6131 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6132 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6136 goto err_ice_add_adv_rule;
6139 if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
6140 status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
6144 goto err_ice_add_adv_rule;
6147 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6148 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6151 goto err_ice_add_adv_rule;
6152 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6153 sizeof(struct ice_adv_fltr_mgmt_list_entry),
6157 goto err_ice_add_adv_rule;
6160 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6161 lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6162 if (!adv_fltr->lkups) {
6164 goto err_ice_add_adv_rule;
6167 adv_fltr->lkups_cnt = lkups_cnt;
6168 adv_fltr->rule_info = *rinfo;
6169 adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6170 sw = hw->switch_info;
6171 sw->recp_list[rid].adv_rule = true;
6172 rule_head = &sw->recp_list[rid].filt_rules;
6174 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6175 adv_fltr->vsi_count = 1;
6177 /* Add rule entry to book keeping list */
6178 list_add(&adv_fltr->list_entry, rule_head);
6180 added_entry->rid = rid;
6181 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6182 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6184 err_ice_add_adv_rule:
6185 if (status && adv_fltr) {
6186 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6187 devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6193 if (profile->match & ICE_PKT_KMALLOC) {
6194 kfree(profile->offsets);
6195 kfree(profile->pkt);
6203 * ice_replay_vsi_fltr - Replay filters for requested VSI
6204 * @hw: pointer to the hardware structure
6205 * @vsi_handle: driver VSI handle
6206 * @recp_id: Recipe ID for which rules need to be replayed
6207 * @list_head: list for which filters need to be replayed
6209 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6210 * It is required to pass valid VSI handle.
6213 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6214 struct list_head *list_head)
6216 struct ice_fltr_mgmt_list_entry *itr;
6220 if (list_empty(list_head))
6222 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6224 list_for_each_entry(itr, list_head, list_entry) {
6225 struct ice_fltr_list_entry f_entry;
6227 f_entry.fltr_info = itr->fltr_info;
6228 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6229 itr->fltr_info.vsi_handle == vsi_handle) {
6230 /* update the src in case it is VSI num */
6231 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6232 f_entry.fltr_info.src = hw_vsi_id;
6233 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6238 if (!itr->vsi_list_info ||
6239 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6241 /* Clearing it so that the logic can add it back */
6242 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6243 f_entry.fltr_info.vsi_handle = vsi_handle;
6244 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6245 /* update the src in case it is VSI num */
6246 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6247 f_entry.fltr_info.src = hw_vsi_id;
6248 if (recp_id == ICE_SW_LKUP_VLAN)
6249 status = ice_add_vlan_internal(hw, &f_entry);
6251 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6260 * ice_adv_rem_update_vsi_list
6261 * @hw: pointer to the hardware structure
6262 * @vsi_handle: VSI handle of the VSI to remove
6263 * @fm_list: filter management entry for which the VSI list management needs to
6267 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6268 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6270 struct ice_vsi_list_map_info *vsi_list_info;
6271 enum ice_sw_lkup_type lkup_type;
6275 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6276 fm_list->vsi_count == 0)
6279 /* A rule with the VSI being removed does not exist */
6280 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6283 lkup_type = ICE_SW_LKUP_LAST;
6284 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6285 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6286 ice_aqc_opc_update_sw_rules,
6291 fm_list->vsi_count--;
6292 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6293 vsi_list_info = fm_list->vsi_list_info;
6294 if (fm_list->vsi_count == 1) {
6295 struct ice_fltr_info tmp_fltr;
6298 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6300 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6303 /* Make sure VSI list is empty before removing it below */
6304 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6306 ice_aqc_opc_update_sw_rules,
6311 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6312 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6313 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6314 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6315 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6316 tmp_fltr.fwd_id.hw_vsi_id =
6317 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6318 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6319 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6320 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6322 /* Update the previous switch rule of "MAC forward to VSI" to
6323 * "MAC fwd to VSI list"
6325 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6327 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6328 tmp_fltr.fwd_id.hw_vsi_id, status);
6331 fm_list->vsi_list_info->ref_cnt--;
6333 /* Remove the VSI list since it is no longer used */
6334 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6336 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6337 vsi_list_id, status);
6341 list_del(&vsi_list_info->list_entry);
6342 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6343 fm_list->vsi_list_info = NULL;
6350 * ice_rem_adv_rule - removes existing advanced switch rule
6351 * @hw: pointer to the hardware structure
6352 * @lkups: information on the words that needs to be looked up. All words
6353 * together makes one recipe
6354 * @lkups_cnt: num of entries in the lkups array
6355 * @rinfo: Its the pointer to the rule information for the rule
6357 * This function can be used to remove 1 rule at a time. The lkups is
6358 * used to describe all the words that forms the "lookup" portion of the
6359 * rule. These words can span multiple protocols. Callers to this function
6360 * need to pass in a list of protocol headers with lookup information along
6361 * and mask that determines which words are valid from the given protocol
6362 * header. rinfo describes other information related to this rule such as
6363 * forwarding IDs, priority of this rule, etc.
6366 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6367 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6369 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6370 struct ice_prot_lkup_ext lkup_exts;
6371 bool remove_rule = false;
6372 struct mutex *rule_lock; /* Lock to protect filter rule list */
6373 u16 i, rid, vsi_handle;
6376 memset(&lkup_exts, 0, sizeof(lkup_exts));
6377 for (i = 0; i < lkups_cnt; i++) {
6380 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6383 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6388 /* Create any special protocol/offset pairs, such as looking at tunnel
6389 * bits by extracting metadata
6391 status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
6395 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6396 /* If did not find a recipe that match the existing criteria */
6397 if (rid == ICE_MAX_NUM_RECIPES)
6400 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6401 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6402 /* the rule is already removed */
6405 mutex_lock(rule_lock);
6406 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6408 } else if (list_elem->vsi_count > 1) {
6409 remove_rule = false;
6410 vsi_handle = rinfo->sw_act.vsi_handle;
6411 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6413 vsi_handle = rinfo->sw_act.vsi_handle;
6414 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6416 mutex_unlock(rule_lock);
6419 if (list_elem->vsi_count == 0)
6422 mutex_unlock(rule_lock);
6424 struct ice_sw_rule_lkup_rx_tx *s_rule;
6427 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6428 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6432 s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6433 s_rule->hdr_len = 0;
6434 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6436 ice_aqc_opc_remove_sw_rules, NULL);
6437 if (!status || status == -ENOENT) {
6438 struct ice_switch_info *sw = hw->switch_info;
6440 mutex_lock(rule_lock);
6441 list_del(&list_elem->list_entry);
6442 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6443 devm_kfree(ice_hw_to_dev(hw), list_elem);
6444 mutex_unlock(rule_lock);
6445 if (list_empty(&sw->recp_list[rid].filt_rules))
6446 sw->recp_list[rid].adv_rule = false;
6454 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6455 * @hw: pointer to the hardware structure
6456 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6458 * This function is used to remove 1 rule at a time. The removal is based on
6459 * the remove_entry parameter. This function will remove rule for a given
6460 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6463 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6464 struct ice_rule_query_data *remove_entry)
6466 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6467 struct list_head *list_head;
6468 struct ice_adv_rule_info rinfo;
6469 struct ice_switch_info *sw;
6471 sw = hw->switch_info;
6472 if (!sw->recp_list[remove_entry->rid].recp_created)
6474 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6475 list_for_each_entry(list_itr, list_head, list_entry) {
6476 if (list_itr->rule_info.fltr_rule_id ==
6477 remove_entry->rule_id) {
6478 rinfo = list_itr->rule_info;
6479 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6480 return ice_rem_adv_rule(hw, list_itr->lkups,
6481 list_itr->lkups_cnt, &rinfo);
6484 /* either list is empty or unable to find rule */
6489 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6491 * @hw: pointer to the hardware structure
6492 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6494 * This function is used to remove all the rules for a given VSI and as soon
6495 * as removing a rule fails, it will return immediately with the error code,
6496 * else it will return success.
6498 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6500 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6501 struct ice_vsi_list_map_info *map_info;
6502 struct ice_adv_rule_info rinfo;
6503 struct list_head *list_head;
6504 struct ice_switch_info *sw;
6508 sw = hw->switch_info;
6509 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6510 if (!sw->recp_list[rid].recp_created)
6512 if (!sw->recp_list[rid].adv_rule)
6515 list_head = &sw->recp_list[rid].filt_rules;
6516 list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6518 rinfo = list_itr->rule_info;
6520 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6521 map_info = list_itr->vsi_list_info;
6525 if (!test_bit(vsi_handle, map_info->vsi_map))
6527 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6531 rinfo.sw_act.vsi_handle = vsi_handle;
6532 status = ice_rem_adv_rule(hw, list_itr->lkups,
6533 list_itr->lkups_cnt, &rinfo);
6542 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6543 * @hw: pointer to the hardware structure
6544 * @vsi_handle: driver VSI handle
6545 * @list_head: list for which filters need to be replayed
6547 * Replay the advanced rule for the given VSI.
6550 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6551 struct list_head *list_head)
6553 struct ice_rule_query_data added_entry = { 0 };
6554 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6557 if (list_empty(list_head))
6559 list_for_each_entry(adv_fltr, list_head, list_entry) {
6560 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6561 u16 lk_cnt = adv_fltr->lkups_cnt;
6563 if (vsi_handle != rinfo->sw_act.vsi_handle)
6565 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6574 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6575 * @hw: pointer to the hardware structure
6576 * @vsi_handle: driver VSI handle
6578 * Replays filters for requested VSI via vsi_handle.
6580 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6582 struct ice_switch_info *sw = hw->switch_info;
6586 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6587 struct list_head *head;
6589 head = &sw->recp_list[i].filt_replay_rules;
6590 if (!sw->recp_list[i].adv_rule)
6591 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6593 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6601 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6602 * @hw: pointer to the HW struct
6604 * Deletes the filter replay rules.
6606 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6608 struct ice_switch_info *sw = hw->switch_info;
6614 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6615 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6616 struct list_head *l_head;
6618 l_head = &sw->recp_list[i].filt_replay_rules;
6619 if (!sw->recp_list[i].adv_rule)
6620 ice_rem_sw_rule_info(hw, l_head);
6622 ice_rem_adv_rule_info(hw, l_head);