1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
5 #include "ice_switch.h"
7 #define ICE_ETH_DA_OFFSET 0
8 #define ICE_ETH_ETHTYPE_OFFSET 12
9 #define ICE_ETH_VLAN_TCI_OFFSET 14
10 #define ICE_MAX_VLAN_ID 0xFFF
11 #define ICE_IPV6_ETHER_ID 0x86DD
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14 * struct to configure any switch filter rules.
15 * {DA (6 bytes), SA(6 bytes),
16 * Ether type (2 bytes for header without VLAN tag) OR
17 * VLAN tag (4 bytes for header with VLAN tag) }
19 * Word on Hardcoded values
20 * byte 0 = 0x2: to identify it as locally administered DA MAC
21 * byte 6 = 0x2: to identify it as locally administered SA MAC
22 * byte 12 = 0x81 & byte 13 = 0x00:
23 * In case of VLAN filter first two bytes defines ether type (0x8100)
24 * and remaining two bytes are placeholder for programming a given VLAN ID
25 * In case of Ether type filter it is treated as header without VLAN tag
26 * and byte 12 and 13 is used to program a given Ether type instead
28 #define DUMMY_ETH_HDR_LEN 16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 ICE_PKT_OUTER_IPV6 = BIT(0),
35 ICE_PKT_TUN_GTPC = BIT(1),
36 ICE_PKT_TUN_GTPU = BIT(2),
37 ICE_PKT_TUN_NVGRE = BIT(3),
38 ICE_PKT_TUN_UDP = BIT(4),
39 ICE_PKT_INNER_IPV6 = BIT(5),
40 ICE_PKT_INNER_TCP = BIT(6),
41 ICE_PKT_INNER_UDP = BIT(7),
42 ICE_PKT_GTP_NOPAY = BIT(8),
43 ICE_PKT_KMALLOC = BIT(9),
44 ICE_PKT_PPPOE = BIT(10),
47 struct ice_dummy_pkt_offsets {
48 enum ice_protocol_type type;
49 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
52 struct ice_dummy_pkt_profile {
53 const struct ice_dummy_pkt_offsets *offsets;
60 #define ICE_DECLARE_PKT_OFFSETS(type) \
61 static const struct ice_dummy_pkt_offsets \
62 ice_dummy_##type##_packet_offsets[]
64 #define ICE_DECLARE_PKT_TEMPLATE(type) \
65 static const u8 ice_dummy_##type##_packet[]
67 #define ICE_PKT_PROFILE(type, m) { \
69 .pkt = ice_dummy_##type##_packet, \
70 .pkt_len = sizeof(ice_dummy_##type##_packet), \
71 .offsets = ice_dummy_##type##_packet_offsets, \
72 .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
75 ICE_DECLARE_PKT_OFFSETS(vlan) = {
76 { ICE_VLAN_OFOS, 12 },
79 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
80 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
83 ICE_DECLARE_PKT_OFFSETS(qinq) = {
88 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
89 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
90 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
93 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
96 { ICE_IPV4_OFOS, 14 },
102 { ICE_PROTOCOL_LAST, 0 },
105 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
106 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
107 0x00, 0x00, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
110 0x08, 0x00, /* ICE_ETYPE_OL 12 */
112 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
113 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x2F, 0x00, 0x00,
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
118 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
119 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_IL 54 */
127 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x06, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
134 0x00, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00,
136 0x50, 0x02, 0x20, 0x00,
137 0x00, 0x00, 0x00, 0x00
140 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
142 { ICE_ETYPE_OL, 12 },
143 { ICE_IPV4_OFOS, 14 },
146 { ICE_ETYPE_IL, 54 },
148 { ICE_UDP_ILOS, 76 },
149 { ICE_PROTOCOL_LAST, 0 },
152 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
153 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
154 0x00, 0x00, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
157 0x08, 0x00, /* ICE_ETYPE_OL 12 */
159 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
160 0x00, 0x00, 0x00, 0x00,
161 0x00, 0x2F, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
165 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
166 0x00, 0x00, 0x00, 0x00,
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_IL 54 */
174 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
181 0x00, 0x08, 0x00, 0x00,
184 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
186 { ICE_ETYPE_OL, 12 },
187 { ICE_IPV4_OFOS, 14 },
191 { ICE_VXLAN_GPE, 42 },
193 { ICE_ETYPE_IL, 62 },
196 { ICE_PROTOCOL_LAST, 0 },
199 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
200 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
201 0x00, 0x00, 0x00, 0x00,
202 0x00, 0x00, 0x00, 0x00,
204 0x08, 0x00, /* ICE_ETYPE_OL 12 */
206 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
207 0x00, 0x01, 0x00, 0x00,
208 0x40, 0x11, 0x00, 0x00,
209 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00,
212 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
213 0x00, 0x46, 0x00, 0x00,
215 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
216 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
222 0x08, 0x00, /* ICE_ETYPE_IL 62 */
224 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
225 0x00, 0x01, 0x00, 0x00,
226 0x40, 0x06, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
231 0x00, 0x00, 0x00, 0x00,
232 0x00, 0x00, 0x00, 0x00,
233 0x50, 0x02, 0x20, 0x00,
234 0x00, 0x00, 0x00, 0x00
237 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
239 { ICE_ETYPE_OL, 12 },
240 { ICE_IPV4_OFOS, 14 },
244 { ICE_VXLAN_GPE, 42 },
246 { ICE_ETYPE_IL, 62 },
248 { ICE_UDP_ILOS, 84 },
249 { ICE_PROTOCOL_LAST, 0 },
252 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
253 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
254 0x00, 0x00, 0x00, 0x00,
255 0x00, 0x00, 0x00, 0x00,
257 0x08, 0x00, /* ICE_ETYPE_OL 12 */
259 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
260 0x00, 0x01, 0x00, 0x00,
261 0x00, 0x11, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00,
265 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
266 0x00, 0x3a, 0x00, 0x00,
268 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
269 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
272 0x00, 0x00, 0x00, 0x00,
273 0x00, 0x00, 0x00, 0x00,
275 0x08, 0x00, /* ICE_ETYPE_IL 62 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
284 0x00, 0x08, 0x00, 0x00,
287 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
289 { ICE_ETYPE_OL, 12 },
290 { ICE_IPV4_OFOS, 14 },
293 { ICE_ETYPE_IL, 54 },
296 { ICE_PROTOCOL_LAST, 0 },
299 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x2F, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
313 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
316 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
319 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
321 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
322 0x00, 0x08, 0x06, 0x40,
323 0x00, 0x00, 0x00, 0x00,
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x50, 0x02, 0x20, 0x00,
336 0x00, 0x00, 0x00, 0x00
339 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
341 { ICE_ETYPE_OL, 12 },
342 { ICE_IPV4_OFOS, 14 },
345 { ICE_ETYPE_IL, 54 },
347 { ICE_UDP_ILOS, 96 },
348 { ICE_PROTOCOL_LAST, 0 },
351 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
352 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
353 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, 0x00, 0x00,
356 0x08, 0x00, /* ICE_ETYPE_OL 12 */
358 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
359 0x00, 0x00, 0x00, 0x00,
360 0x00, 0x2F, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
364 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
365 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
371 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
373 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
374 0x00, 0x08, 0x11, 0x40,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
385 0x00, 0x08, 0x00, 0x00,
388 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
390 { ICE_ETYPE_OL, 12 },
391 { ICE_IPV4_OFOS, 14 },
395 { ICE_VXLAN_GPE, 42 },
397 { ICE_ETYPE_IL, 62 },
400 { ICE_PROTOCOL_LAST, 0 },
403 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x08, 0x00, /* ICE_ETYPE_OL 12 */
410 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
411 0x00, 0x01, 0x00, 0x00,
412 0x40, 0x11, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
417 0x00, 0x5a, 0x00, 0x00,
419 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
420 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
426 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
428 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
429 0x00, 0x08, 0x06, 0x40,
430 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, 0x00, 0x00,
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442 0x50, 0x02, 0x20, 0x00,
443 0x00, 0x00, 0x00, 0x00
446 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
448 { ICE_ETYPE_OL, 12 },
449 { ICE_IPV4_OFOS, 14 },
453 { ICE_VXLAN_GPE, 42 },
455 { ICE_ETYPE_IL, 62 },
457 { ICE_UDP_ILOS, 104 },
458 { ICE_PROTOCOL_LAST, 0 },
461 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
462 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
463 0x00, 0x00, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00,
466 0x08, 0x00, /* ICE_ETYPE_OL 12 */
468 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
469 0x00, 0x01, 0x00, 0x00,
470 0x00, 0x11, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
475 0x00, 0x4e, 0x00, 0x00,
477 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
478 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
481 0x00, 0x00, 0x00, 0x00,
482 0x00, 0x00, 0x00, 0x00,
484 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
487 0x00, 0x08, 0x11, 0x40,
488 0x00, 0x00, 0x00, 0x00,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
498 0x00, 0x08, 0x00, 0x00,
501 /* offset info for MAC + IPv4 + UDP dummy packet */
502 ICE_DECLARE_PKT_OFFSETS(udp) = {
504 { ICE_ETYPE_OL, 12 },
505 { ICE_IPV4_OFOS, 14 },
506 { ICE_UDP_ILOS, 34 },
507 { ICE_PROTOCOL_LAST, 0 },
510 /* Dummy packet for MAC + IPv4 + UDP */
511 ICE_DECLARE_PKT_TEMPLATE(udp) = {
512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
516 0x08, 0x00, /* ICE_ETYPE_OL 12 */
518 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
519 0x00, 0x01, 0x00, 0x00,
520 0x00, 0x11, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
525 0x00, 0x08, 0x00, 0x00,
527 0x00, 0x00, /* 2 bytes for 4 byte alignment */
530 /* offset info for MAC + IPv4 + TCP dummy packet */
531 ICE_DECLARE_PKT_OFFSETS(tcp) = {
533 { ICE_ETYPE_OL, 12 },
534 { ICE_IPV4_OFOS, 14 },
536 { ICE_PROTOCOL_LAST, 0 },
539 /* Dummy packet for MAC + IPv4 + TCP */
540 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
541 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
545 0x08, 0x00, /* ICE_ETYPE_OL 12 */
547 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
548 0x00, 0x01, 0x00, 0x00,
549 0x00, 0x06, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
553 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
554 0x00, 0x00, 0x00, 0x00,
555 0x00, 0x00, 0x00, 0x00,
556 0x50, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
559 0x00, 0x00, /* 2 bytes for 4 byte alignment */
562 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
564 { ICE_ETYPE_OL, 12 },
565 { ICE_IPV6_OFOS, 14 },
567 { ICE_PROTOCOL_LAST, 0 },
570 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
571 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
572 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
575 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
577 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
578 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
579 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
591 0x50, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
594 0x00, 0x00, /* 2 bytes for 4 byte alignment */
598 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
600 { ICE_ETYPE_OL, 12 },
601 { ICE_IPV6_OFOS, 14 },
602 { ICE_UDP_ILOS, 54 },
603 { ICE_PROTOCOL_LAST, 0 },
606 /* IPv6 + UDP dummy packet */
607 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
608 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
612 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
614 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
615 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x00, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
623 0x00, 0x00, 0x00, 0x00,
625 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
626 0x00, 0x10, 0x00, 0x00,
628 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
629 0x00, 0x00, 0x00, 0x00,
631 0x00, 0x00, /* 2 bytes for 4 byte alignment */
634 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
635 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
637 { ICE_IPV4_OFOS, 14 },
642 { ICE_PROTOCOL_LAST, 0 },
645 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
646 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
651 0x45, 0x00, 0x00, 0x58, /* IP 14 */
652 0x00, 0x00, 0x00, 0x00,
653 0x00, 0x11, 0x00, 0x00,
654 0x00, 0x00, 0x00, 0x00,
655 0x00, 0x00, 0x00, 0x00,
657 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
658 0x00, 0x44, 0x00, 0x00,
660 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
661 0x00, 0x00, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x85,
664 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
665 0x00, 0x00, 0x00, 0x00,
667 0x45, 0x00, 0x00, 0x28, /* IP 62 */
668 0x00, 0x00, 0x00, 0x00,
669 0x00, 0x06, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
671 0x00, 0x00, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
674 0x00, 0x00, 0x00, 0x00,
675 0x00, 0x00, 0x00, 0x00,
676 0x50, 0x00, 0x00, 0x00,
677 0x00, 0x00, 0x00, 0x00,
679 0x00, 0x00, /* 2 bytes for 4 byte alignment */
682 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
683 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
685 { ICE_IPV4_OFOS, 14 },
689 { ICE_UDP_ILOS, 82 },
690 { ICE_PROTOCOL_LAST, 0 },
693 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
694 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
695 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00,
699 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
700 0x00, 0x00, 0x00, 0x00,
701 0x00, 0x11, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x00,
703 0x00, 0x00, 0x00, 0x00,
705 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
706 0x00, 0x38, 0x00, 0x00,
708 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x85,
712 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
713 0x00, 0x00, 0x00, 0x00,
715 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
716 0x00, 0x00, 0x00, 0x00,
717 0x00, 0x11, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
722 0x00, 0x08, 0x00, 0x00,
724 0x00, 0x00, /* 2 bytes for 4 byte alignment */
727 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
728 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
730 { ICE_IPV4_OFOS, 14 },
735 { ICE_PROTOCOL_LAST, 0 },
738 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
739 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
740 0x00, 0x00, 0x00, 0x00,
741 0x00, 0x00, 0x00, 0x00,
744 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x11, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
750 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
751 0x00, 0x58, 0x00, 0x00,
753 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
754 0x00, 0x00, 0x00, 0x00,
755 0x00, 0x00, 0x00, 0x85,
757 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
758 0x00, 0x00, 0x00, 0x00,
760 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
761 0x00, 0x14, 0x06, 0x00,
762 0x00, 0x00, 0x00, 0x00,
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x50, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, /* 2 bytes for 4 byte alignment */
780 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
782 { ICE_IPV4_OFOS, 14 },
786 { ICE_UDP_ILOS, 102 },
787 { ICE_PROTOCOL_LAST, 0 },
790 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
791 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x00, 0x00, 0x00,
796 0x45, 0x00, 0x00, 0x60, /* IP 14 */
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x11, 0x00, 0x00,
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00,
802 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
803 0x00, 0x4c, 0x00, 0x00,
805 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
806 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x85,
809 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
810 0x00, 0x00, 0x00, 0x00,
812 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
813 0x00, 0x08, 0x11, 0x00,
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
824 0x00, 0x08, 0x00, 0x00,
826 0x00, 0x00, /* 2 bytes for 4 byte alignment */
829 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
831 { ICE_IPV6_OFOS, 14 },
836 { ICE_PROTOCOL_LAST, 0 },
839 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
840 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
841 0x00, 0x00, 0x00, 0x00,
842 0x00, 0x00, 0x00, 0x00,
845 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
846 0x00, 0x44, 0x11, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
857 0x00, 0x44, 0x00, 0x00,
859 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
860 0x00, 0x00, 0x00, 0x00,
861 0x00, 0x00, 0x00, 0x85,
863 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
864 0x00, 0x00, 0x00, 0x00,
866 0x45, 0x00, 0x00, 0x28, /* IP 82 */
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x06, 0x00, 0x00,
869 0x00, 0x00, 0x00, 0x00,
870 0x00, 0x00, 0x00, 0x00,
872 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x50, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, /* 2 bytes for 4 byte alignment */
881 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
883 { ICE_IPV6_OFOS, 14 },
887 { ICE_UDP_ILOS, 102 },
888 { ICE_PROTOCOL_LAST, 0 },
891 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
892 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
893 0x00, 0x00, 0x00, 0x00,
894 0x00, 0x00, 0x00, 0x00,
897 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
898 0x00, 0x38, 0x11, 0x00,
899 0x00, 0x00, 0x00, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
908 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
909 0x00, 0x38, 0x00, 0x00,
911 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
912 0x00, 0x00, 0x00, 0x00,
913 0x00, 0x00, 0x00, 0x85,
915 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
916 0x00, 0x00, 0x00, 0x00,
918 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
919 0x00, 0x00, 0x00, 0x00,
920 0x00, 0x11, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
925 0x00, 0x08, 0x00, 0x00,
927 0x00, 0x00, /* 2 bytes for 4 byte alignment */
930 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
932 { ICE_IPV6_OFOS, 14 },
937 { ICE_PROTOCOL_LAST, 0 },
940 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
941 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
942 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
946 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
947 0x00, 0x58, 0x11, 0x00,
948 0x00, 0x00, 0x00, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
953 0x00, 0x00, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00,
957 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
958 0x00, 0x58, 0x00, 0x00,
960 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
961 0x00, 0x00, 0x00, 0x00,
962 0x00, 0x00, 0x00, 0x85,
964 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
965 0x00, 0x00, 0x00, 0x00,
967 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
968 0x00, 0x14, 0x06, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
975 0x00, 0x00, 0x00, 0x00,
976 0x00, 0x00, 0x00, 0x00,
978 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
979 0x00, 0x00, 0x00, 0x00,
980 0x00, 0x00, 0x00, 0x00,
981 0x50, 0x00, 0x00, 0x00,
982 0x00, 0x00, 0x00, 0x00,
984 0x00, 0x00, /* 2 bytes for 4 byte alignment */
987 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
989 { ICE_IPV6_OFOS, 14 },
993 { ICE_UDP_ILOS, 122 },
994 { ICE_PROTOCOL_LAST, 0 },
997 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
998 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, 0x00, 0x00,
1003 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1004 0x00, 0x4c, 0x11, 0x00,
1005 0x00, 0x00, 0x00, 0x00,
1006 0x00, 0x00, 0x00, 0x00,
1007 0x00, 0x00, 0x00, 0x00,
1008 0x00, 0x00, 0x00, 0x00,
1009 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00,
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1015 0x00, 0x4c, 0x00, 0x00,
1017 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1018 0x00, 0x00, 0x00, 0x00,
1019 0x00, 0x00, 0x00, 0x85,
1021 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1022 0x00, 0x00, 0x00, 0x00,
1024 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1025 0x00, 0x08, 0x11, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1027 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00,
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, 0x00, 0x00,
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x00,
1033 0x00, 0x00, 0x00, 0x00,
1035 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1036 0x00, 0x08, 0x00, 0x00,
1038 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1041 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1042 { ICE_MAC_OFOS, 0 },
1043 { ICE_IPV4_OFOS, 14 },
1045 { ICE_GTP_NO_PAY, 42 },
1046 { ICE_PROTOCOL_LAST, 0 },
1049 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1050 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1051 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00,
1055 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1056 0x00, 0x00, 0x40, 0x00,
1057 0x40, 0x11, 0x00, 0x00,
1058 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1061 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1062 0x00, 0x00, 0x00, 0x00,
1064 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1065 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x85,
1068 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1069 0x00, 0x00, 0x00, 0x00,
1071 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1072 0x00, 0x00, 0x40, 0x00,
1073 0x40, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1079 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1080 { ICE_MAC_OFOS, 0 },
1081 { ICE_IPV6_OFOS, 14 },
1083 { ICE_GTP_NO_PAY, 62 },
1084 { ICE_PROTOCOL_LAST, 0 },
1087 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1088 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1093 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1094 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1095 0x00, 0x00, 0x00, 0x00,
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1104 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1105 0x00, 0x00, 0x00, 0x00,
1107 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1108 0x00, 0x00, 0x00, 0x00,
1113 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1114 { ICE_MAC_OFOS, 0 },
1115 { ICE_ETYPE_OL, 12 },
1117 { ICE_IPV4_OFOS, 22 },
1119 { ICE_PROTOCOL_LAST, 0 },
1122 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1123 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1124 0x00, 0x00, 0x00, 0x00,
1125 0x00, 0x00, 0x00, 0x00,
1127 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1129 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1132 0x00, 0x21, /* PPP Link Layer 20 */
1134 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1135 0x00, 0x01, 0x00, 0x00,
1136 0x00, 0x06, 0x00, 0x00,
1137 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1140 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1141 0x00, 0x00, 0x00, 0x00,
1142 0x00, 0x00, 0x00, 0x00,
1143 0x50, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00,
1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1149 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1150 { ICE_MAC_OFOS, 0 },
1151 { ICE_ETYPE_OL, 12 },
1153 { ICE_IPV4_OFOS, 22 },
1154 { ICE_UDP_ILOS, 42 },
1155 { ICE_PROTOCOL_LAST, 0 },
1158 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1159 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1160 0x00, 0x00, 0x00, 0x00,
1161 0x00, 0x00, 0x00, 0x00,
1163 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1165 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1168 0x00, 0x21, /* PPP Link Layer 20 */
1170 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1171 0x00, 0x01, 0x00, 0x00,
1172 0x00, 0x11, 0x00, 0x00,
1173 0x00, 0x00, 0x00, 0x00,
1174 0x00, 0x00, 0x00, 0x00,
1176 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1177 0x00, 0x08, 0x00, 0x00,
1179 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1182 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1183 { ICE_MAC_OFOS, 0 },
1184 { ICE_ETYPE_OL, 12 },
1186 { ICE_IPV6_OFOS, 22 },
1188 { ICE_PROTOCOL_LAST, 0 },
1191 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1192 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1193 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00,
1196 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1198 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1201 0x00, 0x57, /* PPP Link Layer 20 */
1203 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1204 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1205 0x00, 0x00, 0x00, 0x00,
1206 0x00, 0x00, 0x00, 0x00,
1207 0x00, 0x00, 0x00, 0x00,
1208 0x00, 0x00, 0x00, 0x00,
1209 0x00, 0x00, 0x00, 0x00,
1210 0x00, 0x00, 0x00, 0x00,
1211 0x00, 0x00, 0x00, 0x00,
1212 0x00, 0x00, 0x00, 0x00,
1214 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1215 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00,
1217 0x50, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00,
1220 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1223 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1224 { ICE_MAC_OFOS, 0 },
1225 { ICE_ETYPE_OL, 12 },
1227 { ICE_IPV6_OFOS, 22 },
1228 { ICE_UDP_ILOS, 62 },
1229 { ICE_PROTOCOL_LAST, 0 },
1232 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1233 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1234 0x00, 0x00, 0x00, 0x00,
1235 0x00, 0x00, 0x00, 0x00,
1237 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1239 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1242 0x00, 0x57, /* PPP Link Layer 20 */
1244 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1245 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1246 0x00, 0x00, 0x00, 0x00,
1247 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x00, 0x00,
1249 0x00, 0x00, 0x00, 0x00,
1250 0x00, 0x00, 0x00, 0x00,
1251 0x00, 0x00, 0x00, 0x00,
1252 0x00, 0x00, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1256 0x00, 0x08, 0x00, 0x00,
1258 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1261 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1262 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1264 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1265 ICE_PKT_OUTER_IPV6 |
1266 ICE_PKT_INNER_IPV6 |
1268 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1269 ICE_PKT_OUTER_IPV6 |
1270 ICE_PKT_INNER_IPV6),
1271 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1272 ICE_PKT_OUTER_IPV6 |
1274 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1275 ICE_PKT_OUTER_IPV6),
1276 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1277 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1278 ICE_PKT_INNER_IPV6 |
1280 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1281 ICE_PKT_INNER_IPV6),
1282 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1284 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1285 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1286 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1287 ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1289 ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1290 ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1291 ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1292 ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1294 ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1295 ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1296 ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1297 ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1298 ICE_PKT_INNER_IPV6 |
1300 ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1301 ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1302 ICE_PKT_INNER_IPV6),
1303 ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1304 ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1305 ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1306 ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1307 ICE_PKT_PROFILE(tcp, 0),
1310 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
1311 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
1312 ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1313 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
1314 ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1315 #define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
1316 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
1318 /* this is a recipe to profile association bitmap */
1319 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1320 ICE_MAX_NUM_PROFILES);
1322 /* this is a profile to recipe association bitmap */
1323 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1324 ICE_MAX_NUM_RECIPES);
1327 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1328 * @hw: pointer to the HW struct
1330 * Allocate memory for the entire recipe table and initialize the structures/
1331 * entries corresponding to basic recipes.
1333 int ice_init_def_sw_recp(struct ice_hw *hw)
1335 struct ice_sw_recipe *recps;
1338 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1339 sizeof(*recps), GFP_KERNEL);
1343 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1344 recps[i].root_rid = i;
1345 INIT_LIST_HEAD(&recps[i].filt_rules);
1346 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1347 INIT_LIST_HEAD(&recps[i].rg_list);
1348 mutex_init(&recps[i].filt_rule_lock);
1351 hw->switch_info->recp_list = recps;
1357 * ice_aq_get_sw_cfg - get switch configuration
1358 * @hw: pointer to the hardware structure
1359 * @buf: pointer to the result buffer
1360 * @buf_size: length of the buffer available for response
1361 * @req_desc: pointer to requested descriptor
1362 * @num_elems: pointer to number of elements
1363 * @cd: pointer to command details structure or NULL
1365 * Get switch configuration (0x0200) to be placed in buf.
1366 * This admin command returns information such as initial VSI/port number
1367 * and switch ID it belongs to.
1369 * NOTE: *req_desc is both an input/output parameter.
1370 * The caller of this function first calls this function with *request_desc set
1371 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1372 * configuration information has been returned; if non-zero (meaning not all
1373 * the information was returned), the caller should call this function again
1374 * with *req_desc set to the previous value returned by f/w to get the
1375 * next block of switch configuration information.
1377 * *num_elems is output only parameter. This reflects the number of elements
1378 * in response buffer. The caller of this function to use *num_elems while
1379 * parsing the response buffer.
1382 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1383 u16 buf_size, u16 *req_desc, u16 *num_elems,
1384 struct ice_sq_cd *cd)
1386 struct ice_aqc_get_sw_cfg *cmd;
1387 struct ice_aq_desc desc;
1390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1391 cmd = &desc.params.get_sw_conf;
1392 cmd->element = cpu_to_le16(*req_desc);
1394 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1396 *req_desc = le16_to_cpu(cmd->element);
1397 *num_elems = le16_to_cpu(cmd->num_elems);
1405 * @hw: pointer to the HW struct
1406 * @vsi_ctx: pointer to a VSI context struct
1407 * @cd: pointer to command details structure or NULL
1409 * Add a VSI context to the hardware (0x0210)
1412 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1413 struct ice_sq_cd *cd)
1415 struct ice_aqc_add_update_free_vsi_resp *res;
1416 struct ice_aqc_add_get_update_free_vsi *cmd;
1417 struct ice_aq_desc desc;
1420 cmd = &desc.params.vsi_cmd;
1421 res = &desc.params.add_update_free_vsi_res;
1423 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1425 if (!vsi_ctx->alloc_from_pool)
1426 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1427 ICE_AQ_VSI_IS_VALID);
1428 cmd->vf_id = vsi_ctx->vf_num;
1430 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1432 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1434 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1435 sizeof(vsi_ctx->info), cd);
1438 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1439 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1440 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1448 * @hw: pointer to the HW struct
1449 * @vsi_ctx: pointer to a VSI context struct
1450 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1451 * @cd: pointer to command details structure or NULL
1453 * Free VSI context info from hardware (0x0213)
1456 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1457 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1459 struct ice_aqc_add_update_free_vsi_resp *resp;
1460 struct ice_aqc_add_get_update_free_vsi *cmd;
1461 struct ice_aq_desc desc;
1464 cmd = &desc.params.vsi_cmd;
1465 resp = &desc.params.add_update_free_vsi_res;
1467 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1469 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1471 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1473 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1475 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1476 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1484 * @hw: pointer to the HW struct
1485 * @vsi_ctx: pointer to a VSI context struct
1486 * @cd: pointer to command details structure or NULL
1488 * Update VSI context in the hardware (0x0211)
1491 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1492 struct ice_sq_cd *cd)
1494 struct ice_aqc_add_update_free_vsi_resp *resp;
1495 struct ice_aqc_add_get_update_free_vsi *cmd;
1496 struct ice_aq_desc desc;
1499 cmd = &desc.params.vsi_cmd;
1500 resp = &desc.params.add_update_free_vsi_res;
1502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1504 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1506 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1508 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1509 sizeof(vsi_ctx->info), cd);
1512 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1513 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1520 * ice_is_vsi_valid - check whether the VSI is valid or not
1521 * @hw: pointer to the HW struct
1522 * @vsi_handle: VSI handle
1524 * check whether the VSI is valid or not
1526 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1528 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1532 * ice_get_hw_vsi_num - return the HW VSI number
1533 * @hw: pointer to the HW struct
1534 * @vsi_handle: VSI handle
1536 * return the HW VSI number
1537 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1539 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1541 return hw->vsi_ctx[vsi_handle]->vsi_num;
1545 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1546 * @hw: pointer to the HW struct
1547 * @vsi_handle: VSI handle
1549 * return the VSI context entry for a given VSI handle
1551 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1553 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1557 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1558 * @hw: pointer to the HW struct
1559 * @vsi_handle: VSI handle
1560 * @vsi: VSI context pointer
1562 * save the VSI context entry for a given VSI handle
1565 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1567 hw->vsi_ctx[vsi_handle] = vsi;
1571 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1572 * @hw: pointer to the HW struct
1573 * @vsi_handle: VSI handle
1575 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1577 struct ice_vsi_ctx *vsi;
1580 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1583 ice_for_each_traffic_class(i) {
1584 if (vsi->lan_q_ctx[i]) {
1585 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1586 vsi->lan_q_ctx[i] = NULL;
1588 if (vsi->rdma_q_ctx[i]) {
1589 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1590 vsi->rdma_q_ctx[i] = NULL;
1596 * ice_clear_vsi_ctx - clear the VSI context entry
1597 * @hw: pointer to the HW struct
1598 * @vsi_handle: VSI handle
1600 * clear the VSI context entry
1602 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1604 struct ice_vsi_ctx *vsi;
1606 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1608 ice_clear_vsi_q_ctx(hw, vsi_handle);
1609 devm_kfree(ice_hw_to_dev(hw), vsi);
1610 hw->vsi_ctx[vsi_handle] = NULL;
1615 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1616 * @hw: pointer to the HW struct
1618 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1622 for (i = 0; i < ICE_MAX_VSI; i++)
1623 ice_clear_vsi_ctx(hw, i);
1627 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1628 * @hw: pointer to the HW struct
1629 * @vsi_handle: unique VSI handle provided by drivers
1630 * @vsi_ctx: pointer to a VSI context struct
1631 * @cd: pointer to command details structure or NULL
1633 * Add a VSI context to the hardware also add it into the VSI handle list.
1634 * If this function gets called after reset for existing VSIs then update
1635 * with the new HW VSI number in the corresponding VSI handle list entry.
1638 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1639 struct ice_sq_cd *cd)
1641 struct ice_vsi_ctx *tmp_vsi_ctx;
1644 if (vsi_handle >= ICE_MAX_VSI)
1646 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1649 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1651 /* Create a new VSI context */
1652 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1653 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1655 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1658 *tmp_vsi_ctx = *vsi_ctx;
1659 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1661 /* update with new HW VSI num */
1662 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1669 * ice_free_vsi- free VSI context from hardware and VSI handle list
1670 * @hw: pointer to the HW struct
1671 * @vsi_handle: unique VSI handle
1672 * @vsi_ctx: pointer to a VSI context struct
1673 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1674 * @cd: pointer to command details structure or NULL
1676 * Free VSI context info from hardware as well as from VSI handle list
1679 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1680 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1684 if (!ice_is_vsi_valid(hw, vsi_handle))
1686 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1687 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1689 ice_clear_vsi_ctx(hw, vsi_handle);
1695 * @hw: pointer to the HW struct
1696 * @vsi_handle: unique VSI handle
1697 * @vsi_ctx: pointer to a VSI context struct
1698 * @cd: pointer to command details structure or NULL
1700 * Update VSI context in the hardware
1703 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1704 struct ice_sq_cd *cd)
1706 if (!ice_is_vsi_valid(hw, vsi_handle))
1708 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1709 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1713 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1714 * @hw: pointer to HW struct
1715 * @vsi_handle: VSI SW index
1716 * @enable: boolean for enable/disable
1719 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1721 struct ice_vsi_ctx *ctx;
1723 ctx = ice_get_vsi_ctx(hw, vsi_handle);
1728 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1730 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1732 return ice_update_vsi(hw, vsi_handle, ctx, NULL);
1736 * ice_aq_alloc_free_vsi_list
1737 * @hw: pointer to the HW struct
1738 * @vsi_list_id: VSI list ID returned or used for lookup
1739 * @lkup_type: switch rule filter lookup type
1740 * @opc: switch rules population command type - pass in the command opcode
1742 * allocates or free a VSI list resource
1745 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1746 enum ice_sw_lkup_type lkup_type,
1747 enum ice_adminq_opc opc)
1749 struct ice_aqc_alloc_free_res_elem *sw_buf;
1750 struct ice_aqc_res_elem *vsi_ele;
1754 buf_len = struct_size(sw_buf, elem, 1);
1755 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1758 sw_buf->num_elems = cpu_to_le16(1);
1760 if (lkup_type == ICE_SW_LKUP_MAC ||
1761 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1762 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1763 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1764 lkup_type == ICE_SW_LKUP_PROMISC ||
1765 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1766 lkup_type == ICE_SW_LKUP_DFLT) {
1767 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1768 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1770 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1773 goto ice_aq_alloc_free_vsi_list_exit;
1776 if (opc == ice_aqc_opc_free_res)
1777 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1779 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1781 goto ice_aq_alloc_free_vsi_list_exit;
1783 if (opc == ice_aqc_opc_alloc_res) {
1784 vsi_ele = &sw_buf->elem[0];
1785 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1788 ice_aq_alloc_free_vsi_list_exit:
1789 devm_kfree(ice_hw_to_dev(hw), sw_buf);
1794 * ice_aq_sw_rules - add/update/remove switch rules
1795 * @hw: pointer to the HW struct
1796 * @rule_list: pointer to switch rule population list
1797 * @rule_list_sz: total size of the rule list in bytes
1798 * @num_rules: number of switch rules in the rule_list
1799 * @opc: switch rules population command type - pass in the command opcode
1800 * @cd: pointer to command details structure or NULL
1802 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1805 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1806 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1808 struct ice_aq_desc desc;
1811 if (opc != ice_aqc_opc_add_sw_rules &&
1812 opc != ice_aqc_opc_update_sw_rules &&
1813 opc != ice_aqc_opc_remove_sw_rules)
1816 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1818 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1819 desc.params.sw_rules.num_rules_fltr_entry_index =
1820 cpu_to_le16(num_rules);
1821 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1822 if (opc != ice_aqc_opc_add_sw_rules &&
1823 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1830 * ice_aq_add_recipe - add switch recipe
1831 * @hw: pointer to the HW struct
1832 * @s_recipe_list: pointer to switch rule population list
1833 * @num_recipes: number of switch recipes in the list
1834 * @cd: pointer to command details structure or NULL
1839 ice_aq_add_recipe(struct ice_hw *hw,
1840 struct ice_aqc_recipe_data_elem *s_recipe_list,
1841 u16 num_recipes, struct ice_sq_cd *cd)
1843 struct ice_aqc_add_get_recipe *cmd;
1844 struct ice_aq_desc desc;
1847 cmd = &desc.params.add_get_recipe;
1848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1850 cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1851 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1853 buf_size = num_recipes * sizeof(*s_recipe_list);
1855 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1859 * ice_aq_get_recipe - get switch recipe
1860 * @hw: pointer to the HW struct
1861 * @s_recipe_list: pointer to switch rule population list
1862 * @num_recipes: pointer to the number of recipes (input and output)
1863 * @recipe_root: root recipe number of recipe(s) to retrieve
1864 * @cd: pointer to command details structure or NULL
1868 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1869 * On output, *num_recipes will equal the number of entries returned in
1872 * The caller must supply enough space in s_recipe_list to hold all possible
1873 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1876 ice_aq_get_recipe(struct ice_hw *hw,
1877 struct ice_aqc_recipe_data_elem *s_recipe_list,
1878 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1880 struct ice_aqc_add_get_recipe *cmd;
1881 struct ice_aq_desc desc;
1885 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1888 cmd = &desc.params.add_get_recipe;
1889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1891 cmd->return_index = cpu_to_le16(recipe_root);
1892 cmd->num_sub_recipes = 0;
1894 buf_size = *num_recipes * sizeof(*s_recipe_list);
1896 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1897 *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1903 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1904 * @hw: pointer to the HW struct
1905 * @params: parameters used to update the default recipe
1907 * This function only supports updating default recipes and it only supports
1908 * updating a single recipe based on the lkup_idx at a time.
1910 * This is done as a read-modify-write operation. First, get the current recipe
1911 * contents based on the recipe's ID. Then modify the field vector index and
1912 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1913 * the pre-existing recipe with the modifications.
1916 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1917 struct ice_update_recipe_lkup_idx_params *params)
1919 struct ice_aqc_recipe_data_elem *rcp_list;
1920 u16 num_recps = ICE_MAX_NUM_RECIPES;
1923 rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1927 /* read current recipe list from firmware */
1928 rcp_list->recipe_indx = params->rid;
1929 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
1931 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
1932 params->rid, status);
1936 /* only modify existing recipe's lkup_idx and mask if valid, while
1937 * leaving all other fields the same, then update the recipe firmware
1939 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
1940 if (params->mask_valid)
1941 rcp_list->content.mask[params->lkup_idx] =
1942 cpu_to_le16(params->mask);
1944 if (params->ignore_valid)
1945 rcp_list->content.lkup_indx[params->lkup_idx] |=
1946 ICE_AQ_RECIPE_LKUP_IGNORE;
1948 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
1950 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
1951 params->rid, params->lkup_idx, params->fv_idx,
1952 params->mask, params->mask_valid ? "true" : "false",
1961 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1962 * @hw: pointer to the HW struct
1963 * @profile_id: package profile ID to associate the recipe with
1964 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1965 * @cd: pointer to command details structure or NULL
1966 * Recipe to profile association (0x0291)
1969 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1970 struct ice_sq_cd *cd)
1972 struct ice_aqc_recipe_to_profile *cmd;
1973 struct ice_aq_desc desc;
1975 cmd = &desc.params.recipe_to_profile;
1976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1977 cmd->profile_id = cpu_to_le16(profile_id);
1978 /* Set the recipe ID bit in the bitmask to let the device know which
1979 * profile we are associating the recipe to
1981 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1983 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1987 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1988 * @hw: pointer to the HW struct
1989 * @profile_id: package profile ID to associate the recipe with
1990 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1991 * @cd: pointer to command details structure or NULL
1992 * Associate profile ID with given recipe (0x0293)
1995 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1996 struct ice_sq_cd *cd)
1998 struct ice_aqc_recipe_to_profile *cmd;
1999 struct ice_aq_desc desc;
2002 cmd = &desc.params.recipe_to_profile;
2003 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2004 cmd->profile_id = cpu_to_le16(profile_id);
2006 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2008 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
2014 * ice_alloc_recipe - add recipe resource
2015 * @hw: pointer to the hardware structure
2016 * @rid: recipe ID returned as response to AQ call
2018 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2020 struct ice_aqc_alloc_free_res_elem *sw_buf;
2024 buf_len = struct_size(sw_buf, elem, 1);
2025 sw_buf = kzalloc(buf_len, GFP_KERNEL);
2029 sw_buf->num_elems = cpu_to_le16(1);
2030 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2031 ICE_AQC_RES_TYPE_S) |
2032 ICE_AQC_RES_TYPE_FLAG_SHARED);
2033 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2034 ice_aqc_opc_alloc_res, NULL);
2036 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2043 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2044 * @hw: pointer to hardware structure
2046 * This function is used to populate recipe_to_profile matrix where index to
2047 * this array is the recipe ID and the element is the mapping of which profiles
2048 * is this recipe mapped to.
2050 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2052 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2055 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2058 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2059 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2060 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2062 bitmap_copy(profile_to_recipe[i], r_bitmap,
2063 ICE_MAX_NUM_RECIPES);
2064 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2065 set_bit(i, recipe_to_profile[j]);
2070 * ice_collect_result_idx - copy result index values
2071 * @buf: buffer that contains the result index
2072 * @recp: the recipe struct to copy data into
2075 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2076 struct ice_sw_recipe *recp)
2078 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2079 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2084 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2085 * @hw: pointer to hardware structure
2086 * @recps: struct that we need to populate
2087 * @rid: recipe ID that we are populating
2088 * @refresh_required: true if we should get recipe to profile mapping from FW
2090 * This function is used to populate all the necessary entries into our
2091 * bookkeeping so that we have a current list of all the recipes that are
2092 * programmed in the firmware.
2095 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2096 bool *refresh_required)
2098 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2099 struct ice_aqc_recipe_data_elem *tmp;
2100 u16 num_recps = ICE_MAX_NUM_RECIPES;
2101 struct ice_prot_lkup_ext *lkup_exts;
2106 bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2108 /* we need a buffer big enough to accommodate all the recipes */
2109 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2113 tmp[0].recipe_indx = rid;
2114 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2115 /* non-zero status meaning recipe doesn't exist */
2119 /* Get recipe to profile map so that we can get the fv from lkups that
2120 * we read for a recipe from FW. Since we want to minimize the number of
2121 * times we make this FW call, just make one call and cache the copy
2122 * until a new recipe is added. This operation is only required the
2123 * first time to get the changes from FW. Then to search existing
2124 * entries we don't need to update the cache again until another recipe
2127 if (*refresh_required) {
2128 ice_get_recp_to_prof_map(hw);
2129 *refresh_required = false;
2132 /* Start populating all the entries for recps[rid] based on lkups from
2133 * firmware. Note that we are only creating the root recipe in our
2136 lkup_exts = &recps[rid].lkup_exts;
2138 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2139 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2140 struct ice_recp_grp_entry *rg_entry;
2141 u8 i, prof, idx, prot = 0;
2145 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2152 idx = root_bufs.recipe_indx;
2153 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2155 /* Mark all result indices in this chain */
2156 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2157 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2160 /* get the first profile that is associated with rid */
2161 prof = find_first_bit(recipe_to_profile[idx],
2162 ICE_MAX_NUM_PROFILES);
2163 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2164 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2166 rg_entry->fv_idx[i] = lkup_indx;
2167 rg_entry->fv_mask[i] =
2168 le16_to_cpu(root_bufs.content.mask[i + 1]);
2170 /* If the recipe is a chained recipe then all its
2171 * child recipe's result will have a result index.
2172 * To fill fv_words we should not use those result
2173 * index, we only need the protocol ids and offsets.
2174 * We will skip all the fv_idx which stores result
2175 * index in them. We also need to skip any fv_idx which
2176 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2177 * valid offset value.
2179 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2180 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2181 rg_entry->fv_idx[i] == 0)
2184 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2185 rg_entry->fv_idx[i], &prot, &off);
2186 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2187 lkup_exts->fv_words[fv_word_idx].off = off;
2188 lkup_exts->field_mask[fv_word_idx] =
2189 rg_entry->fv_mask[i];
2192 /* populate rg_list with the data from the child entry of this
2195 list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2197 /* Propagate some data to the recipe database */
2198 recps[idx].is_root = !!is_root;
2199 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2200 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2201 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2202 recps[idx].chain_idx = root_bufs.content.result_indx &
2203 ~ICE_AQ_RECIPE_RESULT_EN;
2204 set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2206 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2212 /* Only do the following for root recipes entries */
2213 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2214 sizeof(recps[idx].r_bitmap));
2215 recps[idx].root_rid = root_bufs.content.rid &
2216 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2217 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2220 /* Complete initialization of the root recipe entry */
2221 lkup_exts->n_val_words = fv_word_idx;
2222 recps[rid].big_recp = (num_recps > 1);
2223 recps[rid].n_grp_count = (u8)num_recps;
2224 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2225 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2227 if (!recps[rid].root_buf) {
2232 /* Copy result indexes */
2233 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2234 recps[rid].recp_created = true;
2241 /* ice_init_port_info - Initialize port_info with switch configuration data
2242 * @pi: pointer to port_info
2243 * @vsi_port_num: VSI number or port number
2244 * @type: Type of switch element (port or VSI)
2245 * @swid: switch ID of the switch the element is attached to
2246 * @pf_vf_num: PF or VF number
2247 * @is_vf: true if the element is a VF, false otherwise
2250 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2251 u16 swid, u16 pf_vf_num, bool is_vf)
2254 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2255 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2257 pi->pf_vf_num = pf_vf_num;
2261 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2266 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2267 * @hw: pointer to the hardware structure
2269 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2271 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2277 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
2283 /* Multiple calls to ice_aq_get_sw_cfg may be required
2284 * to get all the switch configuration information. The need
2285 * for additional calls is indicated by ice_aq_get_sw_cfg
2286 * writing a non-zero value in req_desc
2289 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2291 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2292 &req_desc, &num_elems, NULL);
2297 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2298 u16 pf_vf_num, swid, vsi_port_num;
2302 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2303 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2305 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2306 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2308 swid = le16_to_cpu(ele->swid);
2310 if (le16_to_cpu(ele->pf_vf_num) &
2311 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2314 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2315 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2317 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2318 /* FW VSI is not needed. Just continue. */
2322 ice_init_port_info(hw->port_info, vsi_port_num,
2323 res_type, swid, pf_vf_num, is_vf);
2325 } while (req_desc && !status);
2327 devm_kfree(ice_hw_to_dev(hw), rbuf);
2332 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2333 * @hw: pointer to the hardware structure
2334 * @fi: filter info structure to fill/update
2336 * This helper function populates the lb_en and lan_en elements of the provided
2337 * ice_fltr_info struct using the switch's type and characteristics of the
2338 * switch rule being configured.
2340 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2344 if ((fi->flag & ICE_FLTR_TX) &&
2345 (fi->fltr_act == ICE_FWD_TO_VSI ||
2346 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2347 fi->fltr_act == ICE_FWD_TO_Q ||
2348 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2349 /* Setting LB for prune actions will result in replicated
2350 * packets to the internal switch that will be dropped.
2352 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2355 /* Set lan_en to TRUE if
2356 * 1. The switch is a VEB AND
2358 * 2.1 The lookup is a directional lookup like ethertype,
2359 * promiscuous, ethertype-MAC, promiscuous-VLAN
2360 * and default-port OR
2361 * 2.2 The lookup is VLAN, OR
2362 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2363 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2367 * The switch is a VEPA.
2369 * In all other cases, the LAN enable has to be set to false.
2372 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2373 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2374 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2375 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2376 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2377 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2378 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2379 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2380 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2381 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2390 * ice_fill_sw_rule - Helper function to fill switch rule structure
2391 * @hw: pointer to the hardware structure
2392 * @f_info: entry containing packet forwarding information
2393 * @s_rule: switch rule structure to be filled in based on mac_entry
2394 * @opc: switch rules population command type - pass in the command opcode
2397 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2398 struct ice_sw_rule_lkup_rx_tx *s_rule,
2399 enum ice_adminq_opc opc)
2401 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2402 u16 vlan_tpid = ETH_P_8021Q;
2410 if (opc == ice_aqc_opc_remove_sw_rules) {
2412 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2413 s_rule->hdr_len = 0;
2417 eth_hdr_sz = sizeof(dummy_eth_header);
2418 eth_hdr = s_rule->hdr_data;
2420 /* initialize the ether header with a dummy header */
2421 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2422 ice_fill_sw_info(hw, f_info);
2424 switch (f_info->fltr_act) {
2425 case ICE_FWD_TO_VSI:
2426 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2427 ICE_SINGLE_ACT_VSI_ID_M;
2428 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2429 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2430 ICE_SINGLE_ACT_VALID_BIT;
2432 case ICE_FWD_TO_VSI_LIST:
2433 act |= ICE_SINGLE_ACT_VSI_LIST;
2434 act |= (f_info->fwd_id.vsi_list_id <<
2435 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2436 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2437 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2438 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2439 ICE_SINGLE_ACT_VALID_BIT;
2442 act |= ICE_SINGLE_ACT_TO_Q;
2443 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2444 ICE_SINGLE_ACT_Q_INDEX_M;
2446 case ICE_DROP_PACKET:
2447 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2448 ICE_SINGLE_ACT_VALID_BIT;
2450 case ICE_FWD_TO_QGRP:
2451 q_rgn = f_info->qgrp_size > 0 ?
2452 (u8)ilog2(f_info->qgrp_size) : 0;
2453 act |= ICE_SINGLE_ACT_TO_Q;
2454 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2455 ICE_SINGLE_ACT_Q_INDEX_M;
2456 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2457 ICE_SINGLE_ACT_Q_REGION_M;
2464 act |= ICE_SINGLE_ACT_LB_ENABLE;
2466 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2468 switch (f_info->lkup_type) {
2469 case ICE_SW_LKUP_MAC:
2470 daddr = f_info->l_data.mac.mac_addr;
2472 case ICE_SW_LKUP_VLAN:
2473 vlan_id = f_info->l_data.vlan.vlan_id;
2474 if (f_info->l_data.vlan.tpid_valid)
2475 vlan_tpid = f_info->l_data.vlan.tpid;
2476 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2477 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2478 act |= ICE_SINGLE_ACT_PRUNE;
2479 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2482 case ICE_SW_LKUP_ETHERTYPE_MAC:
2483 daddr = f_info->l_data.ethertype_mac.mac_addr;
2485 case ICE_SW_LKUP_ETHERTYPE:
2486 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2487 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2489 case ICE_SW_LKUP_MAC_VLAN:
2490 daddr = f_info->l_data.mac_vlan.mac_addr;
2491 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2493 case ICE_SW_LKUP_PROMISC_VLAN:
2494 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2496 case ICE_SW_LKUP_PROMISC:
2497 daddr = f_info->l_data.mac_vlan.mac_addr;
2503 s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2504 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2505 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2507 /* Recipe set depending on lookup type */
2508 s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2509 s_rule->src = cpu_to_le16(f_info->src);
2510 s_rule->act = cpu_to_le32(act);
2513 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2515 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2516 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2517 *off = cpu_to_be16(vlan_id);
2518 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2519 *off = cpu_to_be16(vlan_tpid);
2522 /* Create the switch rule with the final dummy Ethernet header */
2523 if (opc != ice_aqc_opc_update_sw_rules)
2524 s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2528 * ice_add_marker_act
2529 * @hw: pointer to the hardware structure
2530 * @m_ent: the management entry for which sw marker needs to be added
2531 * @sw_marker: sw marker to tag the Rx descriptor with
2532 * @l_id: large action resource ID
2534 * Create a large action to hold software marker and update the switch rule
2535 * entry pointed by m_ent with newly created large action
2538 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2539 u16 sw_marker, u16 l_id)
2541 struct ice_sw_rule_lkup_rx_tx *rx_tx;
2542 struct ice_sw_rule_lg_act *lg_act;
2543 /* For software marker we need 3 large actions
2544 * 1. FWD action: FWD TO VSI or VSI LIST
2545 * 2. GENERIC VALUE action to hold the profile ID
2546 * 3. GENERIC VALUE action to hold the software marker ID
2548 const u16 num_lg_acts = 3;
2555 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2558 /* Create two back-to-back switch rules and submit them to the HW using
2559 * one memory buffer:
2563 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2564 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2565 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2569 rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2571 /* Fill in the first switch rule i.e. large action */
2572 lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2573 lg_act->index = cpu_to_le16(l_id);
2574 lg_act->size = cpu_to_le16(num_lg_acts);
2576 /* First action VSI forwarding or VSI list forwarding depending on how
2579 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2580 m_ent->fltr_info.fwd_id.hw_vsi_id;
2582 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2583 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2584 if (m_ent->vsi_count > 1)
2585 act |= ICE_LG_ACT_VSI_LIST;
2586 lg_act->act[0] = cpu_to_le32(act);
2588 /* Second action descriptor type */
2589 act = ICE_LG_ACT_GENERIC;
2591 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2592 lg_act->act[1] = cpu_to_le32(act);
2594 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2595 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2597 /* Third action Marker value */
2598 act |= ICE_LG_ACT_GENERIC;
2599 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2600 ICE_LG_ACT_GENERIC_VALUE_M;
2602 lg_act->act[2] = cpu_to_le32(act);
2604 /* call the fill switch rule to fill the lookup Tx Rx structure */
2605 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2606 ice_aqc_opc_update_sw_rules);
2608 /* Update the action to point to the large action ID */
2609 rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2610 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2611 ICE_SINGLE_ACT_PTR_VAL_M));
2613 /* Use the filter rule ID of the previously created rule with single
2614 * act. Once the update happens, hardware will treat this as large
2617 rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2619 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2620 ice_aqc_opc_update_sw_rules, NULL);
2622 m_ent->lg_act_idx = l_id;
2623 m_ent->sw_marker_id = sw_marker;
2626 devm_kfree(ice_hw_to_dev(hw), lg_act);
2631 * ice_create_vsi_list_map
2632 * @hw: pointer to the hardware structure
2633 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2634 * @num_vsi: number of VSI handles in the array
2635 * @vsi_list_id: VSI list ID generated as part of allocate resource
2637 * Helper function to create a new entry of VSI list ID to VSI mapping
2638 * using the given VSI list ID
2640 static struct ice_vsi_list_map_info *
2641 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2644 struct ice_switch_info *sw = hw->switch_info;
2645 struct ice_vsi_list_map_info *v_map;
2648 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2652 v_map->vsi_list_id = vsi_list_id;
2654 for (i = 0; i < num_vsi; i++)
2655 set_bit(vsi_handle_arr[i], v_map->vsi_map);
2657 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2662 * ice_update_vsi_list_rule
2663 * @hw: pointer to the hardware structure
2664 * @vsi_handle_arr: array of VSI handles to form a VSI list
2665 * @num_vsi: number of VSI handles in the array
2666 * @vsi_list_id: VSI list ID generated as part of allocate resource
2667 * @remove: Boolean value to indicate if this is a remove action
2668 * @opc: switch rules population command type - pass in the command opcode
2669 * @lkup_type: lookup type of the filter
2671 * Call AQ command to add a new switch rule or update existing switch rule
2672 * using the given VSI list ID
2675 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2676 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2677 enum ice_sw_lkup_type lkup_type)
2679 struct ice_sw_rule_vsi_list *s_rule;
2688 if (lkup_type == ICE_SW_LKUP_MAC ||
2689 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2690 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2691 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2692 lkup_type == ICE_SW_LKUP_PROMISC ||
2693 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2694 lkup_type == ICE_SW_LKUP_DFLT)
2695 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2696 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2697 else if (lkup_type == ICE_SW_LKUP_VLAN)
2698 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2699 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2703 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2704 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2707 for (i = 0; i < num_vsi; i++) {
2708 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2712 /* AQ call requires hw_vsi_id(s) */
2714 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2717 s_rule->hdr.type = cpu_to_le16(rule_type);
2718 s_rule->number_vsi = cpu_to_le16(num_vsi);
2719 s_rule->index = cpu_to_le16(vsi_list_id);
2721 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2724 devm_kfree(ice_hw_to_dev(hw), s_rule);
2729 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2730 * @hw: pointer to the HW struct
2731 * @vsi_handle_arr: array of VSI handles to form a VSI list
2732 * @num_vsi: number of VSI handles in the array
2733 * @vsi_list_id: stores the ID of the VSI list to be created
2734 * @lkup_type: switch rule filter's lookup type
2737 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2738 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2742 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2743 ice_aqc_opc_alloc_res);
2747 /* Update the newly created VSI list to include the specified VSIs */
2748 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2749 *vsi_list_id, false,
2750 ice_aqc_opc_add_sw_rules, lkup_type);
2754 * ice_create_pkt_fwd_rule
2755 * @hw: pointer to the hardware structure
2756 * @f_entry: entry containing packet forwarding information
2758 * Create switch rule with given filter information and add an entry
2759 * to the corresponding filter management list to track this switch rule
2763 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2764 struct ice_fltr_list_entry *f_entry)
2766 struct ice_fltr_mgmt_list_entry *fm_entry;
2767 struct ice_sw_rule_lkup_rx_tx *s_rule;
2768 enum ice_sw_lkup_type l_type;
2769 struct ice_sw_recipe *recp;
2772 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2773 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2777 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2781 goto ice_create_pkt_fwd_rule_exit;
2784 fm_entry->fltr_info = f_entry->fltr_info;
2786 /* Initialize all the fields for the management entry */
2787 fm_entry->vsi_count = 1;
2788 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2789 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2790 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2792 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2793 ice_aqc_opc_add_sw_rules);
2795 status = ice_aq_sw_rules(hw, s_rule,
2796 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2797 ice_aqc_opc_add_sw_rules, NULL);
2799 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2800 goto ice_create_pkt_fwd_rule_exit;
2803 f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2804 fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2806 /* The book keeping entries will get removed when base driver
2807 * calls remove filter AQ command
2809 l_type = fm_entry->fltr_info.lkup_type;
2810 recp = &hw->switch_info->recp_list[l_type];
2811 list_add(&fm_entry->list_entry, &recp->filt_rules);
2813 ice_create_pkt_fwd_rule_exit:
2814 devm_kfree(ice_hw_to_dev(hw), s_rule);
2819 * ice_update_pkt_fwd_rule
2820 * @hw: pointer to the hardware structure
2821 * @f_info: filter information for switch rule
2823 * Call AQ command to update a previously created switch rule with a
2827 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2829 struct ice_sw_rule_lkup_rx_tx *s_rule;
2832 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2833 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2838 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2840 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2842 /* Update switch rule with new rule set to forward VSI list */
2843 status = ice_aq_sw_rules(hw, s_rule,
2844 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2845 ice_aqc_opc_update_sw_rules, NULL);
2847 devm_kfree(ice_hw_to_dev(hw), s_rule);
2852 * ice_update_sw_rule_bridge_mode
2853 * @hw: pointer to the HW struct
2855 * Updates unicast switch filter rules based on VEB/VEPA mode
2857 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2859 struct ice_switch_info *sw = hw->switch_info;
2860 struct ice_fltr_mgmt_list_entry *fm_entry;
2861 struct list_head *rule_head;
2862 struct mutex *rule_lock; /* Lock to protect filter rule list */
2865 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2866 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2868 mutex_lock(rule_lock);
2869 list_for_each_entry(fm_entry, rule_head, list_entry) {
2870 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2871 u8 *addr = fi->l_data.mac.mac_addr;
2873 /* Update unicast Tx rules to reflect the selected
2876 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2877 (fi->fltr_act == ICE_FWD_TO_VSI ||
2878 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2879 fi->fltr_act == ICE_FWD_TO_Q ||
2880 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2881 status = ice_update_pkt_fwd_rule(hw, fi);
2887 mutex_unlock(rule_lock);
2893 * ice_add_update_vsi_list
2894 * @hw: pointer to the hardware structure
2895 * @m_entry: pointer to current filter management list entry
2896 * @cur_fltr: filter information from the book keeping entry
2897 * @new_fltr: filter information with the new VSI to be added
2899 * Call AQ command to add or update previously created VSI list with new VSI.
2901 * Helper function to do book keeping associated with adding filter information
2902 * The algorithm to do the book keeping is described below :
2903 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2904 * if only one VSI has been added till now
2905 * Allocate a new VSI list and add two VSIs
2906 * to this list using switch rule command
2907 * Update the previously created switch rule with the
2908 * newly created VSI list ID
2909 * if a VSI list was previously created
2910 * Add the new VSI to the previously created VSI list set
2911 * using the update switch rule command
2914 ice_add_update_vsi_list(struct ice_hw *hw,
2915 struct ice_fltr_mgmt_list_entry *m_entry,
2916 struct ice_fltr_info *cur_fltr,
2917 struct ice_fltr_info *new_fltr)
2919 u16 vsi_list_id = 0;
2922 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2923 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2926 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2927 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2928 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2929 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2932 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2933 /* Only one entry existed in the mapping and it was not already
2934 * a part of a VSI list. So, create a VSI list with the old and
2937 struct ice_fltr_info tmp_fltr;
2938 u16 vsi_handle_arr[2];
2940 /* A rule already exists with the new VSI being added */
2941 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2944 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2945 vsi_handle_arr[1] = new_fltr->vsi_handle;
2946 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2948 new_fltr->lkup_type);
2952 tmp_fltr = *new_fltr;
2953 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2954 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2955 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2956 /* Update the previous switch rule of "MAC forward to VSI" to
2957 * "MAC fwd to VSI list"
2959 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2963 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2964 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2965 m_entry->vsi_list_info =
2966 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2969 if (!m_entry->vsi_list_info)
2972 /* If this entry was large action then the large action needs
2973 * to be updated to point to FWD to VSI list
2975 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2977 ice_add_marker_act(hw, m_entry,
2978 m_entry->sw_marker_id,
2979 m_entry->lg_act_idx);
2981 u16 vsi_handle = new_fltr->vsi_handle;
2982 enum ice_adminq_opc opcode;
2984 if (!m_entry->vsi_list_info)
2987 /* A rule already exists with the new VSI being added */
2988 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2991 /* Update the previously created VSI list set with
2992 * the new VSI ID passed in
2994 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2995 opcode = ice_aqc_opc_update_sw_rules;
2997 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2998 vsi_list_id, false, opcode,
2999 new_fltr->lkup_type);
3000 /* update VSI list mapping info with new VSI ID */
3002 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3005 m_entry->vsi_count++;
3010 * ice_find_rule_entry - Search a rule entry
3011 * @hw: pointer to the hardware structure
3012 * @recp_id: lookup type for which the specified rule needs to be searched
3013 * @f_info: rule information
3015 * Helper function to search for a given rule entry
3016 * Returns pointer to entry storing the rule if found
3018 static struct ice_fltr_mgmt_list_entry *
3019 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3021 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3022 struct ice_switch_info *sw = hw->switch_info;
3023 struct list_head *list_head;
3025 list_head = &sw->recp_list[recp_id].filt_rules;
3026 list_for_each_entry(list_itr, list_head, list_entry) {
3027 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3028 sizeof(f_info->l_data)) &&
3029 f_info->flag == list_itr->fltr_info.flag) {
3038 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3039 * @hw: pointer to the hardware structure
3040 * @recp_id: lookup type for which VSI lists needs to be searched
3041 * @vsi_handle: VSI handle to be found in VSI list
3042 * @vsi_list_id: VSI list ID found containing vsi_handle
3044 * Helper function to search a VSI list with single entry containing given VSI
3045 * handle element. This can be extended further to search VSI list with more
3046 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3048 static struct ice_vsi_list_map_info *
3049 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3052 struct ice_vsi_list_map_info *map_info = NULL;
3053 struct ice_switch_info *sw = hw->switch_info;
3054 struct ice_fltr_mgmt_list_entry *list_itr;
3055 struct list_head *list_head;
3057 list_head = &sw->recp_list[recp_id].filt_rules;
3058 list_for_each_entry(list_itr, list_head, list_entry) {
3059 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3060 map_info = list_itr->vsi_list_info;
3061 if (test_bit(vsi_handle, map_info->vsi_map)) {
3062 *vsi_list_id = map_info->vsi_list_id;
3071 * ice_add_rule_internal - add rule for a given lookup type
3072 * @hw: pointer to the hardware structure
3073 * @recp_id: lookup type (recipe ID) for which rule has to be added
3074 * @f_entry: structure containing MAC forwarding information
3076 * Adds or updates the rule lists for a given recipe
3079 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3080 struct ice_fltr_list_entry *f_entry)
3082 struct ice_switch_info *sw = hw->switch_info;
3083 struct ice_fltr_info *new_fltr, *cur_fltr;
3084 struct ice_fltr_mgmt_list_entry *m_entry;
3085 struct mutex *rule_lock; /* Lock to protect filter rule list */
3088 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3090 f_entry->fltr_info.fwd_id.hw_vsi_id =
3091 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3093 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3095 mutex_lock(rule_lock);
3096 new_fltr = &f_entry->fltr_info;
3097 if (new_fltr->flag & ICE_FLTR_RX)
3098 new_fltr->src = hw->port_info->lport;
3099 else if (new_fltr->flag & ICE_FLTR_TX)
3100 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3102 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3104 mutex_unlock(rule_lock);
3105 return ice_create_pkt_fwd_rule(hw, f_entry);
3108 cur_fltr = &m_entry->fltr_info;
3109 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3110 mutex_unlock(rule_lock);
3116 * ice_remove_vsi_list_rule
3117 * @hw: pointer to the hardware structure
3118 * @vsi_list_id: VSI list ID generated as part of allocate resource
3119 * @lkup_type: switch rule filter lookup type
3121 * The VSI list should be emptied before this function is called to remove the
3125 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3126 enum ice_sw_lkup_type lkup_type)
3128 struct ice_sw_rule_vsi_list *s_rule;
3132 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3133 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3137 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3138 s_rule->index = cpu_to_le16(vsi_list_id);
3140 /* Free the vsi_list resource that we allocated. It is assumed that the
3141 * list is empty at this point.
3143 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3144 ice_aqc_opc_free_res);
3146 devm_kfree(ice_hw_to_dev(hw), s_rule);
3151 * ice_rem_update_vsi_list
3152 * @hw: pointer to the hardware structure
3153 * @vsi_handle: VSI handle of the VSI to remove
3154 * @fm_list: filter management entry for which the VSI list management needs to
3158 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3159 struct ice_fltr_mgmt_list_entry *fm_list)
3161 enum ice_sw_lkup_type lkup_type;
3165 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3166 fm_list->vsi_count == 0)
3169 /* A rule with the VSI being removed does not exist */
3170 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3173 lkup_type = fm_list->fltr_info.lkup_type;
3174 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3175 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3176 ice_aqc_opc_update_sw_rules,
3181 fm_list->vsi_count--;
3182 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3184 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3185 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3186 struct ice_vsi_list_map_info *vsi_list_info =
3187 fm_list->vsi_list_info;
3190 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3192 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3195 /* Make sure VSI list is empty before removing it below */
3196 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3198 ice_aqc_opc_update_sw_rules,
3203 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3204 tmp_fltr_info.fwd_id.hw_vsi_id =
3205 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3206 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3207 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3209 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3210 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3214 fm_list->fltr_info = tmp_fltr_info;
3217 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3218 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3219 struct ice_vsi_list_map_info *vsi_list_info =
3220 fm_list->vsi_list_info;
3222 /* Remove the VSI list since it is no longer used */
3223 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3225 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3226 vsi_list_id, status);
3230 list_del(&vsi_list_info->list_entry);
3231 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3232 fm_list->vsi_list_info = NULL;
3239 * ice_remove_rule_internal - Remove a filter rule of a given type
3240 * @hw: pointer to the hardware structure
3241 * @recp_id: recipe ID for which the rule needs to removed
3242 * @f_entry: rule entry containing filter information
3245 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3246 struct ice_fltr_list_entry *f_entry)
3248 struct ice_switch_info *sw = hw->switch_info;
3249 struct ice_fltr_mgmt_list_entry *list_elem;
3250 struct mutex *rule_lock; /* Lock to protect filter rule list */
3251 bool remove_rule = false;
3255 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3257 f_entry->fltr_info.fwd_id.hw_vsi_id =
3258 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3260 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3261 mutex_lock(rule_lock);
3262 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3268 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3270 } else if (!list_elem->vsi_list_info) {
3273 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3274 /* a ref_cnt > 1 indicates that the vsi_list is being
3275 * shared by multiple rules. Decrement the ref_cnt and
3276 * remove this rule, but do not modify the list, as it
3277 * is in-use by other rules.
3279 list_elem->vsi_list_info->ref_cnt--;
3282 /* a ref_cnt of 1 indicates the vsi_list is only used
3283 * by one rule. However, the original removal request is only
3284 * for a single VSI. Update the vsi_list first, and only
3285 * remove the rule if there are no further VSIs in this list.
3287 vsi_handle = f_entry->fltr_info.vsi_handle;
3288 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3291 /* if VSI count goes to zero after updating the VSI list */
3292 if (list_elem->vsi_count == 0)
3297 /* Remove the lookup rule */
3298 struct ice_sw_rule_lkup_rx_tx *s_rule;
3300 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3301 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3308 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3309 ice_aqc_opc_remove_sw_rules);
3311 status = ice_aq_sw_rules(hw, s_rule,
3312 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3313 1, ice_aqc_opc_remove_sw_rules, NULL);
3315 /* Remove a book keeping from the list */
3316 devm_kfree(ice_hw_to_dev(hw), s_rule);
3321 list_del(&list_elem->list_entry);
3322 devm_kfree(ice_hw_to_dev(hw), list_elem);
3325 mutex_unlock(rule_lock);
3330 * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3331 * @hw: pointer to the hardware structure
3332 * @mac: MAC address to be checked (for MAC filter)
3333 * @vsi_handle: check MAC filter for this VSI
3335 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3337 struct ice_fltr_mgmt_list_entry *entry;
3338 struct list_head *rule_head;
3339 struct ice_switch_info *sw;
3340 struct mutex *rule_lock; /* Lock to protect filter rule list */
3343 if (!ice_is_vsi_valid(hw, vsi_handle))
3346 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3347 sw = hw->switch_info;
3348 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3352 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3353 mutex_lock(rule_lock);
3354 list_for_each_entry(entry, rule_head, list_entry) {
3355 struct ice_fltr_info *f_info = &entry->fltr_info;
3356 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3358 if (is_zero_ether_addr(mac_addr))
3361 if (f_info->flag != ICE_FLTR_TX ||
3362 f_info->src_id != ICE_SRC_ID_VSI ||
3363 f_info->lkup_type != ICE_SW_LKUP_MAC ||
3364 f_info->fltr_act != ICE_FWD_TO_VSI ||
3365 hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3368 if (ether_addr_equal(mac, mac_addr)) {
3369 mutex_unlock(rule_lock);
3373 mutex_unlock(rule_lock);
3378 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3379 * @hw: pointer to the hardware structure
3381 * @vsi_handle: check MAC filter for this VSI
3383 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3385 struct ice_fltr_mgmt_list_entry *entry;
3386 struct list_head *rule_head;
3387 struct ice_switch_info *sw;
3388 struct mutex *rule_lock; /* Lock to protect filter rule list */
3391 if (vlan_id > ICE_MAX_VLAN_ID)
3394 if (!ice_is_vsi_valid(hw, vsi_handle))
3397 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3398 sw = hw->switch_info;
3399 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3403 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3404 mutex_lock(rule_lock);
3405 list_for_each_entry(entry, rule_head, list_entry) {
3406 struct ice_fltr_info *f_info = &entry->fltr_info;
3407 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3408 struct ice_vsi_list_map_info *map_info;
3410 if (entry_vlan_id > ICE_MAX_VLAN_ID)
3413 if (f_info->flag != ICE_FLTR_TX ||
3414 f_info->src_id != ICE_SRC_ID_VSI ||
3415 f_info->lkup_type != ICE_SW_LKUP_VLAN)
3418 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3419 if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3420 f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3423 if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3424 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3426 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3427 /* If filter_action is FWD_TO_VSI_LIST, make sure
3428 * that VSI being checked is part of VSI list
3430 if (entry->vsi_count == 1 &&
3431 entry->vsi_list_info) {
3432 map_info = entry->vsi_list_info;
3433 if (!test_bit(vsi_handle, map_info->vsi_map))
3438 if (vlan_id == entry_vlan_id) {
3439 mutex_unlock(rule_lock);
3443 mutex_unlock(rule_lock);
3449 * ice_add_mac - Add a MAC address based filter rule
3450 * @hw: pointer to the hardware structure
3451 * @m_list: list of MAC addresses and forwarding information
3453 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3454 * multiple unicast addresses, the function assumes that all the
3455 * addresses are unique in a given add_mac call. It doesn't
3456 * check for duplicates in this case, removing duplicates from a given
3457 * list should be taken care of in the caller of this function.
3459 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3461 struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
3462 struct ice_fltr_list_entry *m_list_itr;
3463 struct list_head *rule_head;
3464 u16 total_elem_left, s_rule_size;
3465 struct ice_switch_info *sw;
3466 struct mutex *rule_lock; /* Lock to protect filter rule list */
3467 u16 num_unicast = 0;
3475 sw = hw->switch_info;
3476 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3477 list_for_each_entry(m_list_itr, m_list, list_entry) {
3478 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3482 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3483 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3484 if (!ice_is_vsi_valid(hw, vsi_handle))
3486 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3487 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3488 /* update the src in case it is VSI num */
3489 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3491 m_list_itr->fltr_info.src = hw_vsi_id;
3492 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3493 is_zero_ether_addr(add))
3495 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3496 /* Don't overwrite the unicast address */
3497 mutex_lock(rule_lock);
3498 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3499 &m_list_itr->fltr_info)) {
3500 mutex_unlock(rule_lock);
3503 mutex_unlock(rule_lock);
3505 } else if (is_multicast_ether_addr(add) ||
3506 (is_unicast_ether_addr(add) && hw->ucast_shared)) {
3507 m_list_itr->status =
3508 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3510 if (m_list_itr->status)
3511 return m_list_itr->status;
3515 mutex_lock(rule_lock);
3516 /* Exit if no suitable entries were found for adding bulk switch rule */
3519 goto ice_add_mac_exit;
3522 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3524 /* Allocate switch rule buffer for the bulk update for unicast */
3525 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
3526 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
3530 goto ice_add_mac_exit;
3534 list_for_each_entry(m_list_itr, m_list, list_entry) {
3535 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3536 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3538 if (is_unicast_ether_addr(mac_addr)) {
3539 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3540 ice_aqc_opc_add_sw_rules);
3541 r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
3545 /* Call AQ bulk switch rule update for all unicast addresses */
3547 /* Call AQ switch rule in AQ_MAX chunk */
3548 for (total_elem_left = num_unicast; total_elem_left > 0;
3549 total_elem_left -= elem_sent) {
3550 struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
3552 elem_sent = min_t(u8, total_elem_left,
3553 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3554 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3555 elem_sent, ice_aqc_opc_add_sw_rules,
3558 goto ice_add_mac_exit;
3559 r_iter = (typeof(s_rule))
3560 ((u8 *)r_iter + (elem_sent * s_rule_size));
3563 /* Fill up rule ID based on the value returned from FW */
3565 list_for_each_entry(m_list_itr, m_list, list_entry) {
3566 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3567 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3568 struct ice_fltr_mgmt_list_entry *fm_entry;
3570 if (is_unicast_ether_addr(mac_addr)) {
3571 f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
3572 f_info->fltr_act = ICE_FWD_TO_VSI;
3573 /* Create an entry to track this MAC address */
3574 fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
3575 sizeof(*fm_entry), GFP_KERNEL);
3578 goto ice_add_mac_exit;
3580 fm_entry->fltr_info = *f_info;
3581 fm_entry->vsi_count = 1;
3582 /* The book keeping entries will get removed when
3583 * base driver calls remove filter AQ command
3586 list_add(&fm_entry->list_entry, rule_head);
3587 r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
3592 mutex_unlock(rule_lock);
3594 devm_kfree(ice_hw_to_dev(hw), s_rule);
3599 * ice_add_vlan_internal - Add one VLAN based filter rule
3600 * @hw: pointer to the hardware structure
3601 * @f_entry: filter entry containing one VLAN information
3604 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3606 struct ice_switch_info *sw = hw->switch_info;
3607 struct ice_fltr_mgmt_list_entry *v_list_itr;
3608 struct ice_fltr_info *new_fltr, *cur_fltr;
3609 enum ice_sw_lkup_type lkup_type;
3610 u16 vsi_list_id = 0, vsi_handle;
3611 struct mutex *rule_lock; /* Lock to protect filter rule list */
3614 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3617 f_entry->fltr_info.fwd_id.hw_vsi_id =
3618 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3619 new_fltr = &f_entry->fltr_info;
3621 /* VLAN ID should only be 12 bits */
3622 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3625 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3628 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3629 lkup_type = new_fltr->lkup_type;
3630 vsi_handle = new_fltr->vsi_handle;
3631 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3632 mutex_lock(rule_lock);
3633 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3635 struct ice_vsi_list_map_info *map_info = NULL;
3637 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3638 /* All VLAN pruning rules use a VSI list. Check if
3639 * there is already a VSI list containing VSI that we
3640 * want to add. If found, use the same vsi_list_id for
3641 * this new VLAN rule or else create a new list.
3643 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3647 status = ice_create_vsi_list_rule(hw,
3655 /* Convert the action to forwarding to a VSI list. */
3656 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3657 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3660 status = ice_create_pkt_fwd_rule(hw, f_entry);
3662 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3668 /* reuse VSI list for new rule and increment ref_cnt */
3670 v_list_itr->vsi_list_info = map_info;
3671 map_info->ref_cnt++;
3673 v_list_itr->vsi_list_info =
3674 ice_create_vsi_list_map(hw, &vsi_handle,
3678 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3679 /* Update existing VSI list to add new VSI ID only if it used
3682 cur_fltr = &v_list_itr->fltr_info;
3683 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3686 /* If VLAN rule exists and VSI list being used by this rule is
3687 * referenced by more than 1 VLAN rule. Then create a new VSI
3688 * list appending previous VSI with new VSI and update existing
3689 * VLAN rule to point to new VSI list ID
3691 struct ice_fltr_info tmp_fltr;
3692 u16 vsi_handle_arr[2];
3695 /* Current implementation only supports reusing VSI list with
3696 * one VSI count. We should never hit below condition
3698 if (v_list_itr->vsi_count > 1 &&
3699 v_list_itr->vsi_list_info->ref_cnt > 1) {
3700 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3706 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3709 /* A rule already exists with the new VSI being added */
3710 if (cur_handle == vsi_handle) {
3715 vsi_handle_arr[0] = cur_handle;
3716 vsi_handle_arr[1] = vsi_handle;
3717 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3718 &vsi_list_id, lkup_type);
3722 tmp_fltr = v_list_itr->fltr_info;
3723 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3724 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3725 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3726 /* Update the previous switch rule to a new VSI list which
3727 * includes current VSI that is requested
3729 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3733 /* before overriding VSI list map info. decrement ref_cnt of
3736 v_list_itr->vsi_list_info->ref_cnt--;
3738 /* now update to newly created list */
3739 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3740 v_list_itr->vsi_list_info =
3741 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3743 v_list_itr->vsi_count++;
3747 mutex_unlock(rule_lock);
3752 * ice_add_vlan - Add VLAN based filter rule
3753 * @hw: pointer to the hardware structure
3754 * @v_list: list of VLAN entries and forwarding information
3756 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3758 struct ice_fltr_list_entry *v_list_itr;
3763 list_for_each_entry(v_list_itr, v_list, list_entry) {
3764 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3766 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3767 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3768 if (v_list_itr->status)
3769 return v_list_itr->status;
3775 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3776 * @hw: pointer to the hardware structure
3777 * @em_list: list of ether type MAC filter, MAC is optional
3779 * This function requires the caller to populate the entries in
3780 * the filter list with the necessary fields (including flags to
3781 * indicate Tx or Rx rules).
3783 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3785 struct ice_fltr_list_entry *em_list_itr;
3787 if (!em_list || !hw)
3790 list_for_each_entry(em_list_itr, em_list, list_entry) {
3791 enum ice_sw_lkup_type l_type =
3792 em_list_itr->fltr_info.lkup_type;
3794 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3795 l_type != ICE_SW_LKUP_ETHERTYPE)
3798 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3800 if (em_list_itr->status)
3801 return em_list_itr->status;
3807 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3808 * @hw: pointer to the hardware structure
3809 * @em_list: list of ethertype or ethertype MAC entries
3811 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3813 struct ice_fltr_list_entry *em_list_itr, *tmp;
3815 if (!em_list || !hw)
3818 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3819 enum ice_sw_lkup_type l_type =
3820 em_list_itr->fltr_info.lkup_type;
3822 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3823 l_type != ICE_SW_LKUP_ETHERTYPE)
3826 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3828 if (em_list_itr->status)
3829 return em_list_itr->status;
3835 * ice_rem_sw_rule_info
3836 * @hw: pointer to the hardware structure
3837 * @rule_head: pointer to the switch list structure that we want to delete
3840 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3842 if (!list_empty(rule_head)) {
3843 struct ice_fltr_mgmt_list_entry *entry;
3844 struct ice_fltr_mgmt_list_entry *tmp;
3846 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3847 list_del(&entry->list_entry);
3848 devm_kfree(ice_hw_to_dev(hw), entry);
3854 * ice_rem_adv_rule_info
3855 * @hw: pointer to the hardware structure
3856 * @rule_head: pointer to the switch list structure that we want to delete
3859 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3861 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3862 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3864 if (list_empty(rule_head))
3867 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3868 list_del(&lst_itr->list_entry);
3869 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3870 devm_kfree(ice_hw_to_dev(hw), lst_itr);
3875 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3876 * @pi: pointer to the port_info structure
3877 * @vsi_handle: VSI handle to set as default
3878 * @set: true to add the above mentioned switch rule, false to remove it
3879 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3881 * add filter rule to set/unset given VSI as default VSI for the switch
3882 * (represented by swid)
3885 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3888 struct ice_fltr_list_entry f_list_entry;
3889 struct ice_fltr_info f_info;
3890 struct ice_hw *hw = pi->hw;
3894 if (!ice_is_vsi_valid(hw, vsi_handle))
3897 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3899 memset(&f_info, 0, sizeof(f_info));
3901 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3902 f_info.flag = direction;
3903 f_info.fltr_act = ICE_FWD_TO_VSI;
3904 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3905 f_info.vsi_handle = vsi_handle;
3907 if (f_info.flag & ICE_FLTR_RX) {
3908 f_info.src = hw->port_info->lport;
3909 f_info.src_id = ICE_SRC_ID_LPORT;
3910 } else if (f_info.flag & ICE_FLTR_TX) {
3911 f_info.src_id = ICE_SRC_ID_VSI;
3912 f_info.src = hw_vsi_id;
3914 f_list_entry.fltr_info = f_info;
3917 status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3920 status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3927 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3928 * @fm_entry: filter entry to inspect
3929 * @vsi_handle: VSI handle to compare with filter info
3932 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3934 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3935 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3936 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3937 fm_entry->vsi_list_info &&
3938 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3942 * ice_check_if_dflt_vsi - check if VSI is default VSI
3943 * @pi: pointer to the port_info structure
3944 * @vsi_handle: vsi handle to check for in filter list
3945 * @rule_exists: indicates if there are any VSI's in the rule list
3947 * checks if the VSI is in a default VSI list, and also indicates
3948 * if the default VSI list is empty
3951 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3954 struct ice_fltr_mgmt_list_entry *fm_entry;
3955 struct ice_sw_recipe *recp_list;
3956 struct list_head *rule_head;
3957 struct mutex *rule_lock; /* Lock to protect filter rule list */
3960 recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3961 rule_lock = &recp_list->filt_rule_lock;
3962 rule_head = &recp_list->filt_rules;
3964 mutex_lock(rule_lock);
3966 if (rule_exists && !list_empty(rule_head))
3967 *rule_exists = true;
3969 list_for_each_entry(fm_entry, rule_head, list_entry) {
3970 if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3976 mutex_unlock(rule_lock);
3982 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3983 * @hw: pointer to the hardware structure
3984 * @recp_id: lookup type for which the specified rule needs to be searched
3985 * @f_info: rule information
3987 * Helper function to search for a unicast rule entry - this is to be used
3988 * to remove unicast MAC filter that is not shared with other VSIs on the
3991 * Returns pointer to entry storing the rule if found
3993 static struct ice_fltr_mgmt_list_entry *
3994 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3995 struct ice_fltr_info *f_info)
3997 struct ice_switch_info *sw = hw->switch_info;
3998 struct ice_fltr_mgmt_list_entry *list_itr;
3999 struct list_head *list_head;
4001 list_head = &sw->recp_list[recp_id].filt_rules;
4002 list_for_each_entry(list_itr, list_head, list_entry) {
4003 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4004 sizeof(f_info->l_data)) &&
4005 f_info->fwd_id.hw_vsi_id ==
4006 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4007 f_info->flag == list_itr->fltr_info.flag)
4014 * ice_remove_mac - remove a MAC address based filter rule
4015 * @hw: pointer to the hardware structure
4016 * @m_list: list of MAC addresses and forwarding information
4018 * This function removes either a MAC filter rule or a specific VSI from a
4019 * VSI list for a multicast MAC address.
4021 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
4022 * be aware that this call will only work if all the entries passed into m_list
4023 * were added previously. It will not attempt to do a partial remove of entries
4026 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
4028 struct ice_fltr_list_entry *list_itr, *tmp;
4029 struct mutex *rule_lock; /* Lock to protect filter rule list */
4034 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4035 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
4036 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4037 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4040 if (l_type != ICE_SW_LKUP_MAC)
4043 vsi_handle = list_itr->fltr_info.vsi_handle;
4044 if (!ice_is_vsi_valid(hw, vsi_handle))
4047 list_itr->fltr_info.fwd_id.hw_vsi_id =
4048 ice_get_hw_vsi_num(hw, vsi_handle);
4049 if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
4050 /* Don't remove the unicast address that belongs to
4051 * another VSI on the switch, since it is not being
4054 mutex_lock(rule_lock);
4055 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
4056 &list_itr->fltr_info)) {
4057 mutex_unlock(rule_lock);
4060 mutex_unlock(rule_lock);
4062 list_itr->status = ice_remove_rule_internal(hw,
4065 if (list_itr->status)
4066 return list_itr->status;
4072 * ice_remove_vlan - Remove VLAN based filter rule
4073 * @hw: pointer to the hardware structure
4074 * @v_list: list of VLAN entries and forwarding information
4076 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
4078 struct ice_fltr_list_entry *v_list_itr, *tmp;
4083 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4084 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4086 if (l_type != ICE_SW_LKUP_VLAN)
4088 v_list_itr->status = ice_remove_rule_internal(hw,
4091 if (v_list_itr->status)
4092 return v_list_itr->status;
4098 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4099 * @hw: pointer to the hardware structure
4100 * @vsi_handle: VSI handle to remove filters from
4101 * @vsi_list_head: pointer to the list to add entry to
4102 * @fi: pointer to fltr_info of filter entry to copy & add
4104 * Helper function, used when creating a list of filters to remove from
4105 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4106 * original filter entry, with the exception of fltr_info.fltr_act and
4107 * fltr_info.fwd_id fields. These are set such that later logic can
4108 * extract which VSI to remove the fltr from, and pass on that information.
4111 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4112 struct list_head *vsi_list_head,
4113 struct ice_fltr_info *fi)
4115 struct ice_fltr_list_entry *tmp;
4117 /* this memory is freed up in the caller function
4118 * once filters for this VSI are removed
4120 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4124 tmp->fltr_info = *fi;
4126 /* Overwrite these fields to indicate which VSI to remove filter from,
4127 * so find and remove logic can extract the information from the
4128 * list entries. Note that original entries will still have proper
4131 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4132 tmp->fltr_info.vsi_handle = vsi_handle;
4133 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4135 list_add(&tmp->list_entry, vsi_list_head);
4141 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4142 * @hw: pointer to the hardware structure
4143 * @vsi_handle: VSI handle to remove filters from
4144 * @lkup_list_head: pointer to the list that has certain lookup type filters
4145 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4147 * Locates all filters in lkup_list_head that are used by the given VSI,
4148 * and adds COPIES of those entries to vsi_list_head (intended to be used
4149 * to remove the listed filters).
4150 * Note that this means all entries in vsi_list_head must be explicitly
4151 * deallocated by the caller when done with list.
4154 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4155 struct list_head *lkup_list_head,
4156 struct list_head *vsi_list_head)
4158 struct ice_fltr_mgmt_list_entry *fm_entry;
4161 /* check to make sure VSI ID is valid and within boundary */
4162 if (!ice_is_vsi_valid(hw, vsi_handle))
4165 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4166 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4169 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4171 &fm_entry->fltr_info);
4179 * ice_determine_promisc_mask
4180 * @fi: filter info to parse
4182 * Helper function to determine which ICE_PROMISC_ mask corresponds
4183 * to given filter into.
4185 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4187 u16 vid = fi->l_data.mac_vlan.vlan_id;
4188 u8 *macaddr = fi->l_data.mac.mac_addr;
4189 bool is_tx_fltr = false;
4190 u8 promisc_mask = 0;
4192 if (fi->flag == ICE_FLTR_TX)
4195 if (is_broadcast_ether_addr(macaddr))
4196 promisc_mask |= is_tx_fltr ?
4197 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4198 else if (is_multicast_ether_addr(macaddr))
4199 promisc_mask |= is_tx_fltr ?
4200 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4201 else if (is_unicast_ether_addr(macaddr))
4202 promisc_mask |= is_tx_fltr ?
4203 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4205 promisc_mask |= is_tx_fltr ?
4206 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4208 return promisc_mask;
4212 * ice_remove_promisc - Remove promisc based filter rules
4213 * @hw: pointer to the hardware structure
4214 * @recp_id: recipe ID for which the rule needs to removed
4215 * @v_list: list of promisc entries
4218 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4220 struct ice_fltr_list_entry *v_list_itr, *tmp;
4222 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4223 v_list_itr->status =
4224 ice_remove_rule_internal(hw, recp_id, v_list_itr);
4225 if (v_list_itr->status)
4226 return v_list_itr->status;
4232 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4233 * @hw: pointer to the hardware structure
4234 * @vsi_handle: VSI handle to clear mode
4235 * @promisc_mask: mask of promiscuous config bits to clear
4236 * @vid: VLAN ID to clear VLAN promiscuous
4239 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4242 struct ice_switch_info *sw = hw->switch_info;
4243 struct ice_fltr_list_entry *fm_entry, *tmp;
4244 struct list_head remove_list_head;
4245 struct ice_fltr_mgmt_list_entry *itr;
4246 struct list_head *rule_head;
4247 struct mutex *rule_lock; /* Lock to protect filter rule list */
4251 if (!ice_is_vsi_valid(hw, vsi_handle))
4254 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4255 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4257 recipe_id = ICE_SW_LKUP_PROMISC;
4259 rule_head = &sw->recp_list[recipe_id].filt_rules;
4260 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4262 INIT_LIST_HEAD(&remove_list_head);
4264 mutex_lock(rule_lock);
4265 list_for_each_entry(itr, rule_head, list_entry) {
4266 struct ice_fltr_info *fltr_info;
4267 u8 fltr_promisc_mask = 0;
4269 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4271 fltr_info = &itr->fltr_info;
4273 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4274 vid != fltr_info->l_data.mac_vlan.vlan_id)
4277 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4279 /* Skip if filter is not completely specified by given mask */
4280 if (fltr_promisc_mask & ~promisc_mask)
4283 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4287 mutex_unlock(rule_lock);
4288 goto free_fltr_list;
4291 mutex_unlock(rule_lock);
4293 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4296 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4297 list_del(&fm_entry->list_entry);
4298 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4305 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4306 * @hw: pointer to the hardware structure
4307 * @vsi_handle: VSI handle to configure
4308 * @promisc_mask: mask of promiscuous config bits
4309 * @vid: VLAN ID to set VLAN promiscuous
4312 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4314 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4315 struct ice_fltr_list_entry f_list_entry;
4316 struct ice_fltr_info new_fltr;
4323 if (!ice_is_vsi_valid(hw, vsi_handle))
4325 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4327 memset(&new_fltr, 0, sizeof(new_fltr));
4329 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4330 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4331 new_fltr.l_data.mac_vlan.vlan_id = vid;
4332 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4334 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4335 recipe_id = ICE_SW_LKUP_PROMISC;
4338 /* Separate filters must be set for each direction/packet type
4339 * combination, so we will loop over the mask value, store the
4340 * individual type, and clear it out in the input mask as it
4343 while (promisc_mask) {
4349 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4350 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4351 pkt_type = UCAST_FLTR;
4352 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4353 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4354 pkt_type = UCAST_FLTR;
4356 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4357 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4358 pkt_type = MCAST_FLTR;
4359 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4360 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4361 pkt_type = MCAST_FLTR;
4363 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4364 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4365 pkt_type = BCAST_FLTR;
4366 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4367 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4368 pkt_type = BCAST_FLTR;
4372 /* Check for VLAN promiscuous flag */
4373 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4374 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4375 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4376 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4380 /* Set filter DA based on packet type */
4381 mac_addr = new_fltr.l_data.mac.mac_addr;
4382 if (pkt_type == BCAST_FLTR) {
4383 eth_broadcast_addr(mac_addr);
4384 } else if (pkt_type == MCAST_FLTR ||
4385 pkt_type == UCAST_FLTR) {
4386 /* Use the dummy ether header DA */
4387 ether_addr_copy(mac_addr, dummy_eth_header);
4388 if (pkt_type == MCAST_FLTR)
4389 mac_addr[0] |= 0x1; /* Set multicast bit */
4392 /* Need to reset this to zero for all iterations */
4395 new_fltr.flag |= ICE_FLTR_TX;
4396 new_fltr.src = hw_vsi_id;
4398 new_fltr.flag |= ICE_FLTR_RX;
4399 new_fltr.src = hw->port_info->lport;
4402 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4403 new_fltr.vsi_handle = vsi_handle;
4404 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4405 f_list_entry.fltr_info = new_fltr;
4407 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4409 goto set_promisc_exit;
4417 * ice_set_vlan_vsi_promisc
4418 * @hw: pointer to the hardware structure
4419 * @vsi_handle: VSI handle to configure
4420 * @promisc_mask: mask of promiscuous config bits
4421 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4423 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4426 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4427 bool rm_vlan_promisc)
4429 struct ice_switch_info *sw = hw->switch_info;
4430 struct ice_fltr_list_entry *list_itr, *tmp;
4431 struct list_head vsi_list_head;
4432 struct list_head *vlan_head;
4433 struct mutex *vlan_lock; /* Lock to protect filter rule list */
4437 INIT_LIST_HEAD(&vsi_list_head);
4438 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4439 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4440 mutex_lock(vlan_lock);
4441 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4443 mutex_unlock(vlan_lock);
4445 goto free_fltr_list;
4447 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4448 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4449 if (rm_vlan_promisc)
4450 status = ice_clear_vsi_promisc(hw, vsi_handle,
4451 promisc_mask, vlan_id);
4453 status = ice_set_vsi_promisc(hw, vsi_handle,
4454 promisc_mask, vlan_id);
4460 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4461 list_del(&list_itr->list_entry);
4462 devm_kfree(ice_hw_to_dev(hw), list_itr);
4468 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4469 * @hw: pointer to the hardware structure
4470 * @vsi_handle: VSI handle to remove filters from
4471 * @lkup: switch rule filter lookup type
4474 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4475 enum ice_sw_lkup_type lkup)
4477 struct ice_switch_info *sw = hw->switch_info;
4478 struct ice_fltr_list_entry *fm_entry;
4479 struct list_head remove_list_head;
4480 struct list_head *rule_head;
4481 struct ice_fltr_list_entry *tmp;
4482 struct mutex *rule_lock; /* Lock to protect filter rule list */
4485 INIT_LIST_HEAD(&remove_list_head);
4486 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4487 rule_head = &sw->recp_list[lkup].filt_rules;
4488 mutex_lock(rule_lock);
4489 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4491 mutex_unlock(rule_lock);
4493 goto free_fltr_list;
4496 case ICE_SW_LKUP_MAC:
4497 ice_remove_mac(hw, &remove_list_head);
4499 case ICE_SW_LKUP_VLAN:
4500 ice_remove_vlan(hw, &remove_list_head);
4502 case ICE_SW_LKUP_PROMISC:
4503 case ICE_SW_LKUP_PROMISC_VLAN:
4504 ice_remove_promisc(hw, lkup, &remove_list_head);
4506 case ICE_SW_LKUP_MAC_VLAN:
4507 case ICE_SW_LKUP_ETHERTYPE:
4508 case ICE_SW_LKUP_ETHERTYPE_MAC:
4509 case ICE_SW_LKUP_DFLT:
4510 case ICE_SW_LKUP_LAST:
4512 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4517 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4518 list_del(&fm_entry->list_entry);
4519 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4524 * ice_remove_vsi_fltr - Remove all filters for a VSI
4525 * @hw: pointer to the hardware structure
4526 * @vsi_handle: VSI handle to remove filters from
4528 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4530 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4531 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4532 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4533 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4534 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4535 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4536 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4537 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4541 * ice_alloc_res_cntr - allocating resource counter
4542 * @hw: pointer to the hardware structure
4543 * @type: type of resource
4544 * @alloc_shared: if set it is shared else dedicated
4545 * @num_items: number of entries requested for FD resource type
4546 * @counter_id: counter index returned by AQ call
4549 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4552 struct ice_aqc_alloc_free_res_elem *buf;
4556 /* Allocate resource */
4557 buf_len = struct_size(buf, elem, 1);
4558 buf = kzalloc(buf_len, GFP_KERNEL);
4562 buf->num_elems = cpu_to_le16(num_items);
4563 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4564 ICE_AQC_RES_TYPE_M) | alloc_shared);
4566 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4567 ice_aqc_opc_alloc_res, NULL);
4571 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4579 * ice_free_res_cntr - free resource counter
4580 * @hw: pointer to the hardware structure
4581 * @type: type of resource
4582 * @alloc_shared: if set it is shared else dedicated
4583 * @num_items: number of entries to be freed for FD resource type
4584 * @counter_id: counter ID resource which needs to be freed
4587 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4590 struct ice_aqc_alloc_free_res_elem *buf;
4595 buf_len = struct_size(buf, elem, 1);
4596 buf = kzalloc(buf_len, GFP_KERNEL);
4600 buf->num_elems = cpu_to_le16(num_items);
4601 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4602 ICE_AQC_RES_TYPE_M) | alloc_shared);
4603 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4605 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4606 ice_aqc_opc_free_res, NULL);
4608 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4614 /* This is mapping table entry that maps every word within a given protocol
4615 * structure to the real byte offset as per the specification of that
4617 * for example dst address is 3 words in ethertype header and corresponding
4618 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4619 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4620 * matching entry describing its field. This needs to be updated if new
4621 * structure is added to that union.
4623 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4624 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4625 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4626 { ICE_ETYPE_OL, { 0 } },
4627 { ICE_ETYPE_IL, { 0 } },
4628 { ICE_VLAN_OFOS, { 2, 0 } },
4629 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4630 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4631 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4632 26, 28, 30, 32, 34, 36, 38 } },
4633 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4634 26, 28, 30, 32, 34, 36, 38 } },
4635 { ICE_TCP_IL, { 0, 2 } },
4636 { ICE_UDP_OF, { 0, 2 } },
4637 { ICE_UDP_ILOS, { 0, 2 } },
4638 { ICE_VXLAN, { 8, 10, 12, 14 } },
4639 { ICE_GENEVE, { 8, 10, 12, 14 } },
4640 { ICE_NVGRE, { 0, 2, 4, 6 } },
4641 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
4642 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
4643 { ICE_PPPOE, { 0, 2, 4, 6 } },
4644 { ICE_VLAN_EX, { 2, 0 } },
4645 { ICE_VLAN_IN, { 2, 0 } },
4648 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4649 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4650 { ICE_MAC_IL, ICE_MAC_IL_HW },
4651 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4652 { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
4653 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4654 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4655 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4656 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4657 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4658 { ICE_TCP_IL, ICE_TCP_IL_HW },
4659 { ICE_UDP_OF, ICE_UDP_OF_HW },
4660 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4661 { ICE_VXLAN, ICE_UDP_OF_HW },
4662 { ICE_GENEVE, ICE_UDP_OF_HW },
4663 { ICE_NVGRE, ICE_GRE_OF_HW },
4664 { ICE_GTP, ICE_UDP_OF_HW },
4665 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
4666 { ICE_PPPOE, ICE_PPPOE_HW },
4667 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
4668 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
4672 * ice_find_recp - find a recipe
4673 * @hw: pointer to the hardware structure
4674 * @lkup_exts: extension sequence to match
4675 * @tun_type: type of recipe tunnel
4677 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4680 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4681 enum ice_sw_tunnel_type tun_type)
4683 bool refresh_required = true;
4684 struct ice_sw_recipe *recp;
4687 /* Walk through existing recipes to find a match */
4688 recp = hw->switch_info->recp_list;
4689 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4690 /* If recipe was not created for this ID, in SW bookkeeping,
4691 * check if FW has an entry for this recipe. If the FW has an
4692 * entry update it in our SW bookkeeping and continue with the
4695 if (!recp[i].recp_created)
4696 if (ice_get_recp_frm_fw(hw,
4697 hw->switch_info->recp_list, i,
4701 /* Skip inverse action recipes */
4702 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4703 ICE_AQ_RECIPE_ACT_INV_ACT)
4706 /* if number of words we are looking for match */
4707 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4708 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4709 struct ice_fv_word *be = lkup_exts->fv_words;
4710 u16 *cr = recp[i].lkup_exts.field_mask;
4711 u16 *de = lkup_exts->field_mask;
4715 /* ar, cr, and qr are related to the recipe words, while
4716 * be, de, and pe are related to the lookup words
4718 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4719 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4721 if (ar[qr].off == be[pe].off &&
4722 ar[qr].prot_id == be[pe].prot_id &&
4724 /* Found the "pe"th word in the
4729 /* After walking through all the words in the
4730 * "i"th recipe if "p"th word was not found then
4731 * this recipe is not what we are looking for.
4732 * So break out from this loop and try the next
4735 if (qr >= recp[i].lkup_exts.n_val_words) {
4740 /* If for "i"th recipe the found was never set to false
4741 * then it means we found our match
4742 * Also tun type of recipe needs to be checked
4744 if (found && recp[i].tun_type == tun_type)
4745 return i; /* Return the recipe ID */
4748 return ICE_MAX_NUM_RECIPES;
4752 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4754 * As protocol id for outer vlan is different in dvm and svm, if dvm is
4755 * supported protocol array record for outer vlan has to be modified to
4756 * reflect the value proper for DVM.
4758 void ice_change_proto_id_to_dvm(void)
4762 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4763 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4764 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4765 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4769 * ice_prot_type_to_id - get protocol ID from protocol type
4770 * @type: protocol type
4771 * @id: pointer to variable that will receive the ID
4773 * Returns true if found, false otherwise
4775 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4779 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4780 if (ice_prot_id_tbl[i].type == type) {
4781 *id = ice_prot_id_tbl[i].protocol_id;
4788 * ice_fill_valid_words - count valid words
4789 * @rule: advanced rule with lookup information
4790 * @lkup_exts: byte offset extractions of the words that are valid
4792 * calculate valid words in a lookup rule using mask value
4795 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4796 struct ice_prot_lkup_ext *lkup_exts)
4798 u8 j, word, prot_id, ret_val;
4800 if (!ice_prot_type_to_id(rule->type, &prot_id))
4803 word = lkup_exts->n_val_words;
4805 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4806 if (((u16 *)&rule->m_u)[j] &&
4807 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4808 /* No more space to accommodate */
4809 if (word >= ICE_MAX_CHAIN_WORDS)
4811 lkup_exts->fv_words[word].off =
4812 ice_prot_ext[rule->type].offs[j];
4813 lkup_exts->fv_words[word].prot_id =
4814 ice_prot_id_tbl[rule->type].protocol_id;
4815 lkup_exts->field_mask[word] =
4816 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4820 ret_val = word - lkup_exts->n_val_words;
4821 lkup_exts->n_val_words = word;
4827 * ice_create_first_fit_recp_def - Create a recipe grouping
4828 * @hw: pointer to the hardware structure
4829 * @lkup_exts: an array of protocol header extractions
4830 * @rg_list: pointer to a list that stores new recipe groups
4831 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4833 * Using first fit algorithm, take all the words that are still not done
4834 * and start grouping them in 4-word groups. Each group makes up one
4838 ice_create_first_fit_recp_def(struct ice_hw *hw,
4839 struct ice_prot_lkup_ext *lkup_exts,
4840 struct list_head *rg_list,
4843 struct ice_pref_recipe_group *grp = NULL;
4848 /* Walk through every word in the rule to check if it is not done. If so
4849 * then this word needs to be part of a new recipe.
4851 for (j = 0; j < lkup_exts->n_val_words; j++)
4852 if (!test_bit(j, lkup_exts->done)) {
4854 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4855 struct ice_recp_grp_entry *entry;
4857 entry = devm_kzalloc(ice_hw_to_dev(hw),
4862 list_add(&entry->l_entry, rg_list);
4863 grp = &entry->r_group;
4867 grp->pairs[grp->n_val_pairs].prot_id =
4868 lkup_exts->fv_words[j].prot_id;
4869 grp->pairs[grp->n_val_pairs].off =
4870 lkup_exts->fv_words[j].off;
4871 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4879 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4880 * @hw: pointer to the hardware structure
4881 * @fv_list: field vector with the extraction sequence information
4882 * @rg_list: recipe groupings with protocol-offset pairs
4884 * Helper function to fill in the field vector indices for protocol-offset
4885 * pairs. These indexes are then ultimately programmed into a recipe.
4888 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4889 struct list_head *rg_list)
4891 struct ice_sw_fv_list_entry *fv;
4892 struct ice_recp_grp_entry *rg;
4893 struct ice_fv_word *fv_ext;
4895 if (list_empty(fv_list))
4898 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4900 fv_ext = fv->fv_ptr->ew;
4902 list_for_each_entry(rg, rg_list, l_entry) {
4905 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4906 struct ice_fv_word *pr;
4911 pr = &rg->r_group.pairs[i];
4912 mask = rg->r_group.mask[i];
4914 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4915 if (fv_ext[j].prot_id == pr->prot_id &&
4916 fv_ext[j].off == pr->off) {
4919 /* Store index of field vector */
4921 rg->fv_mask[i] = mask;
4925 /* Protocol/offset could not be found, caller gave an
4937 * ice_find_free_recp_res_idx - find free result indexes for recipe
4938 * @hw: pointer to hardware structure
4939 * @profiles: bitmap of profiles that will be associated with the new recipe
4940 * @free_idx: pointer to variable to receive the free index bitmap
4942 * The algorithm used here is:
4943 * 1. When creating a new recipe, create a set P which contains all
4944 * Profiles that will be associated with our new recipe
4946 * 2. For each Profile p in set P:
4947 * a. Add all recipes associated with Profile p into set R
4948 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4949 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4950 * i. Or just assume they all have the same possible indexes:
4952 * i.e., PossibleIndexes = 0x0000F00000000000
4954 * 3. For each Recipe r in set R:
4955 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4956 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4958 * FreeIndexes will contain the bits indicating the indexes free for use,
4959 * then the code needs to update the recipe[r].used_result_idx_bits to
4960 * indicate which indexes were selected for use by this recipe.
4963 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4964 unsigned long *free_idx)
4966 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4967 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4968 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4971 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4972 bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4974 bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4976 /* For each profile we are going to associate the recipe with, add the
4977 * recipes that are associated with that profile. This will give us
4978 * the set of recipes that our recipe may collide with. Also, determine
4979 * what possible result indexes are usable given this set of profiles.
4981 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4982 bitmap_or(recipes, recipes, profile_to_recipe[bit],
4983 ICE_MAX_NUM_RECIPES);
4984 bitmap_and(possible_idx, possible_idx,
4985 hw->switch_info->prof_res_bm[bit],
4989 /* For each recipe that our new recipe may collide with, determine
4990 * which indexes have been used.
4992 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4993 bitmap_or(used_idx, used_idx,
4994 hw->switch_info->recp_list[bit].res_idxs,
4997 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4999 /* return number of free indexes */
5000 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
5004 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5005 * @hw: pointer to hardware structure
5006 * @rm: recipe management list entry
5007 * @profiles: bitmap of profiles that will be associated.
5010 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5011 unsigned long *profiles)
5013 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
5014 struct ice_aqc_recipe_data_elem *tmp;
5015 struct ice_aqc_recipe_data_elem *buf;
5016 struct ice_recp_grp_entry *entry;
5023 /* When more than one recipe are required, another recipe is needed to
5024 * chain them together. Matching a tunnel metadata ID takes up one of
5025 * the match fields in the chaining recipe reducing the number of
5026 * chained recipes by one.
5028 /* check number of free result indices */
5029 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
5030 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5032 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5033 free_res_idx, rm->n_grp_count);
5035 if (rm->n_grp_count > 1) {
5036 if (rm->n_grp_count > free_res_idx)
5042 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5045 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
5049 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
5056 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5057 recipe_count = ICE_MAX_NUM_RECIPES;
5058 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5060 if (status || recipe_count == 0)
5063 /* Allocate the recipe resources, and configure them according to the
5064 * match fields from protocol headers and extracted field vectors.
5066 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5067 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5070 status = ice_alloc_recipe(hw, &entry->rid);
5074 /* Clear the result index of the located recipe, as this will be
5075 * updated, if needed, later in the recipe creation process.
5077 tmp[0].content.result_indx = 0;
5079 buf[recps] = tmp[0];
5080 buf[recps].recipe_indx = (u8)entry->rid;
5081 /* if the recipe is a non-root recipe RID should be programmed
5082 * as 0 for the rules to be applied correctly.
5084 buf[recps].content.rid = 0;
5085 memset(&buf[recps].content.lkup_indx, 0,
5086 sizeof(buf[recps].content.lkup_indx));
5088 /* All recipes use look-up index 0 to match switch ID. */
5089 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5090 buf[recps].content.mask[0] =
5091 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5092 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5095 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5096 buf[recps].content.lkup_indx[i] = 0x80;
5097 buf[recps].content.mask[i] = 0;
5100 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5101 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5102 buf[recps].content.mask[i + 1] =
5103 cpu_to_le16(entry->fv_mask[i]);
5106 if (rm->n_grp_count > 1) {
5107 /* Checks to see if there really is a valid result index
5110 if (chain_idx >= ICE_MAX_FV_WORDS) {
5111 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5116 entry->chain_idx = chain_idx;
5117 buf[recps].content.result_indx =
5118 ICE_AQ_RECIPE_RESULT_EN |
5119 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5120 ICE_AQ_RECIPE_RESULT_DATA_M);
5121 clear_bit(chain_idx, result_idx_bm);
5122 chain_idx = find_first_bit(result_idx_bm,
5126 /* fill recipe dependencies */
5127 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5128 ICE_MAX_NUM_RECIPES);
5129 set_bit(buf[recps].recipe_indx,
5130 (unsigned long *)buf[recps].recipe_bitmap);
5131 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5135 if (rm->n_grp_count == 1) {
5136 rm->root_rid = buf[0].recipe_indx;
5137 set_bit(buf[0].recipe_indx, rm->r_bitmap);
5138 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5139 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5140 memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5141 sizeof(buf[0].recipe_bitmap));
5146 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5147 * the recipe which is getting created if specified
5148 * by user. Usually any advanced switch filter, which results
5149 * into new extraction sequence, ended up creating a new recipe
5150 * of type ROOT and usually recipes are associated with profiles
5151 * Switch rule referreing newly created recipe, needs to have
5152 * either/or 'fwd' or 'join' priority, otherwise switch rule
5153 * evaluation will not happen correctly. In other words, if
5154 * switch rule to be evaluated on priority basis, then recipe
5155 * needs to have priority, otherwise it will be evaluated last.
5157 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5159 struct ice_recp_grp_entry *last_chain_entry;
5162 /* Allocate the last recipe that will chain the outcomes of the
5163 * other recipes together
5165 status = ice_alloc_recipe(hw, &rid);
5169 buf[recps].recipe_indx = (u8)rid;
5170 buf[recps].content.rid = (u8)rid;
5171 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5172 /* the new entry created should also be part of rg_list to
5173 * make sure we have complete recipe
5175 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5176 sizeof(*last_chain_entry),
5178 if (!last_chain_entry) {
5182 last_chain_entry->rid = rid;
5183 memset(&buf[recps].content.lkup_indx, 0,
5184 sizeof(buf[recps].content.lkup_indx));
5185 /* All recipes use look-up index 0 to match switch ID. */
5186 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5187 buf[recps].content.mask[0] =
5188 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5189 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5190 buf[recps].content.lkup_indx[i] =
5191 ICE_AQ_RECIPE_LKUP_IGNORE;
5192 buf[recps].content.mask[i] = 0;
5196 /* update r_bitmap with the recp that is used for chaining */
5197 set_bit(rid, rm->r_bitmap);
5198 /* this is the recipe that chains all the other recipes so it
5199 * should not have a chaining ID to indicate the same
5201 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5202 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5203 last_chain_entry->fv_idx[i] = entry->chain_idx;
5204 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5205 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5206 set_bit(entry->rid, rm->r_bitmap);
5208 list_add(&last_chain_entry->l_entry, &rm->rg_list);
5209 if (sizeof(buf[recps].recipe_bitmap) >=
5210 sizeof(rm->r_bitmap)) {
5211 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5212 sizeof(buf[recps].recipe_bitmap));
5217 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5220 rm->root_rid = (u8)rid;
5222 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5226 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5227 ice_release_change_lock(hw);
5231 /* Every recipe that just got created add it to the recipe
5234 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5235 struct ice_switch_info *sw = hw->switch_info;
5236 bool is_root, idx_found = false;
5237 struct ice_sw_recipe *recp;
5238 u16 idx, buf_idx = 0;
5240 /* find buffer index for copying some data */
5241 for (idx = 0; idx < rm->n_grp_count; idx++)
5242 if (buf[idx].recipe_indx == entry->rid) {
5252 recp = &sw->recp_list[entry->rid];
5253 is_root = (rm->root_rid == entry->rid);
5254 recp->is_root = is_root;
5256 recp->root_rid = entry->rid;
5257 recp->big_recp = (is_root && rm->n_grp_count > 1);
5259 memcpy(&recp->ext_words, entry->r_group.pairs,
5260 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5262 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5263 sizeof(recp->r_bitmap));
5265 /* Copy non-result fv index values and masks to recipe. This
5266 * call will also update the result recipe bitmask.
5268 ice_collect_result_idx(&buf[buf_idx], recp);
5270 /* for non-root recipes, also copy to the root, this allows
5271 * easier matching of a complete chained recipe
5274 ice_collect_result_idx(&buf[buf_idx],
5275 &sw->recp_list[rm->root_rid]);
5277 recp->n_ext_words = entry->r_group.n_val_pairs;
5278 recp->chain_idx = entry->chain_idx;
5279 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5280 recp->n_grp_count = rm->n_grp_count;
5281 recp->tun_type = rm->tun_type;
5282 recp->recp_created = true;
5291 devm_kfree(ice_hw_to_dev(hw), buf);
5296 * ice_create_recipe_group - creates recipe group
5297 * @hw: pointer to hardware structure
5298 * @rm: recipe management list entry
5299 * @lkup_exts: lookup elements
5302 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5303 struct ice_prot_lkup_ext *lkup_exts)
5308 rm->n_grp_count = 0;
5310 /* Create recipes for words that are marked not done by packing them
5313 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5314 &rm->rg_list, &recp_count);
5316 rm->n_grp_count += recp_count;
5317 rm->n_ext_words = lkup_exts->n_val_words;
5318 memcpy(&rm->ext_words, lkup_exts->fv_words,
5319 sizeof(rm->ext_words));
5320 memcpy(rm->word_masks, lkup_exts->field_mask,
5321 sizeof(rm->word_masks));
5328 * ice_tun_type_match_word - determine if tun type needs a match mask
5329 * @tun_type: tunnel type
5330 * @mask: mask to be used for the tunnel
5332 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
5335 case ICE_SW_TUN_GENEVE:
5336 case ICE_SW_TUN_VXLAN:
5337 case ICE_SW_TUN_NVGRE:
5338 case ICE_SW_TUN_GTPU:
5339 case ICE_SW_TUN_GTPC:
5340 *mask = ICE_TUN_FLAG_MASK;
5350 * ice_add_special_words - Add words that are not protocols, such as metadata
5351 * @rinfo: other information regarding the rule e.g. priority and action info
5352 * @lkup_exts: lookup word structure
5353 * @dvm_ena: is double VLAN mode enabled
5356 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5357 struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
5361 /* If this is a tunneled packet, then add recipe index to match the
5362 * tunnel bit in the packet metadata flags.
5364 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
5365 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5366 u8 word = lkup_exts->n_val_words++;
5368 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5369 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
5370 lkup_exts->field_mask[word] = mask;
5376 if (rinfo->vlan_type != 0 && dvm_ena) {
5377 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5378 u8 word = lkup_exts->n_val_words++;
5380 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5381 lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
5382 lkup_exts->field_mask[word] =
5383 ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
5392 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5393 * @hw: pointer to hardware structure
5394 * @rinfo: other information regarding the rule e.g. priority and action info
5395 * @bm: pointer to memory for returning the bitmap of field vectors
5398 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5401 enum ice_prof_type prof_type;
5403 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5405 switch (rinfo->tun_type) {
5407 prof_type = ICE_PROF_NON_TUN;
5409 case ICE_ALL_TUNNELS:
5410 prof_type = ICE_PROF_TUN_ALL;
5412 case ICE_SW_TUN_GENEVE:
5413 case ICE_SW_TUN_VXLAN:
5414 prof_type = ICE_PROF_TUN_UDP;
5416 case ICE_SW_TUN_NVGRE:
5417 prof_type = ICE_PROF_TUN_GRE;
5419 case ICE_SW_TUN_GTPU:
5420 prof_type = ICE_PROF_TUN_GTPU;
5422 case ICE_SW_TUN_GTPC:
5423 prof_type = ICE_PROF_TUN_GTPC;
5425 case ICE_SW_TUN_AND_NON_TUN:
5427 prof_type = ICE_PROF_ALL;
5431 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5435 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5436 * @hw: pointer to hardware structure
5437 * @lkups: lookup elements or match criteria for the advanced recipe, one
5438 * structure per protocol header
5439 * @lkups_cnt: number of protocols
5440 * @rinfo: other information regarding the rule e.g. priority and action info
5441 * @rid: return the recipe ID of the recipe created
5444 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5445 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5447 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5448 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5449 struct ice_prot_lkup_ext *lkup_exts;
5450 struct ice_recp_grp_entry *r_entry;
5451 struct ice_sw_fv_list_entry *fvit;
5452 struct ice_recp_grp_entry *r_tmp;
5453 struct ice_sw_fv_list_entry *tmp;
5454 struct ice_sw_recipe *rm;
5461 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5465 /* Determine the number of words to be matched and if it exceeds a
5466 * recipe's restrictions
5468 for (i = 0; i < lkups_cnt; i++) {
5471 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5473 goto err_free_lkup_exts;
5476 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5479 goto err_free_lkup_exts;
5483 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5486 goto err_free_lkup_exts;
5489 /* Get field vectors that contain fields extracted from all the protocol
5490 * headers being programmed.
5492 INIT_LIST_HEAD(&rm->fv_list);
5493 INIT_LIST_HEAD(&rm->rg_list);
5495 /* Get bitmap of field vectors (profiles) that are compatible with the
5496 * rule request; only these will be searched in the subsequent call to
5497 * ice_get_sw_fv_list.
5499 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5501 status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5505 /* Create any special protocol/offset pairs, such as looking at tunnel
5506 * bits by extracting metadata
5508 status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
5510 goto err_free_lkup_exts;
5512 /* Group match words into recipes using preferred recipe grouping
5515 status = ice_create_recipe_group(hw, rm, lkup_exts);
5519 /* set the recipe priority if specified */
5520 rm->priority = (u8)rinfo->priority;
5522 /* Find offsets from the field vector. Pick the first one for all the
5525 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5529 /* get bitmap of all profiles the recipe will be associated with */
5530 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5531 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5532 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5533 set_bit((u16)fvit->profile_id, profiles);
5536 /* Look for a recipe which matches our requested fv / mask list */
5537 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5538 if (*rid < ICE_MAX_NUM_RECIPES)
5539 /* Success if found a recipe that match the existing criteria */
5542 rm->tun_type = rinfo->tun_type;
5543 /* Recipe we need does not exist, add a recipe */
5544 status = ice_add_sw_recipe(hw, rm, profiles);
5548 /* Associate all the recipes created with all the profiles in the
5549 * common field vector.
5551 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5552 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5555 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5556 (u8 *)r_bitmap, NULL);
5560 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5561 ICE_MAX_NUM_RECIPES);
5562 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5566 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5569 ice_release_change_lock(hw);
5574 /* Update profile to recipe bitmap array */
5575 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5576 ICE_MAX_NUM_RECIPES);
5578 /* Update recipe to profile bitmap array */
5579 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5580 set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5583 *rid = rm->root_rid;
5584 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5585 sizeof(*lkup_exts));
5587 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5588 list_del(&r_entry->l_entry);
5589 devm_kfree(ice_hw_to_dev(hw), r_entry);
5592 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5593 list_del(&fvit->list_entry);
5594 devm_kfree(ice_hw_to_dev(hw), fvit);
5598 devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5609 * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5611 * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5612 * @num_vlan: number of VLAN tags
5614 static struct ice_dummy_pkt_profile *
5615 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5618 struct ice_dummy_pkt_profile *profile;
5619 struct ice_dummy_pkt_offsets *offsets;
5620 u32 buf_len, off, etype_off, i;
5623 if (num_vlan < 1 || num_vlan > 2)
5624 return ERR_PTR(-EINVAL);
5626 off = num_vlan * VLAN_HLEN;
5628 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5629 dummy_pkt->offsets_len;
5630 offsets = kzalloc(buf_len, GFP_KERNEL);
5632 return ERR_PTR(-ENOMEM);
5634 offsets[0] = dummy_pkt->offsets[0];
5635 if (num_vlan == 2) {
5636 offsets[1] = ice_dummy_qinq_packet_offsets[0];
5637 offsets[2] = ice_dummy_qinq_packet_offsets[1];
5638 } else if (num_vlan == 1) {
5639 offsets[1] = ice_dummy_vlan_packet_offsets[0];
5642 for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5643 offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5644 offsets[i + num_vlan].offset =
5645 dummy_pkt->offsets[i].offset + off;
5647 offsets[i + num_vlan] = dummy_pkt->offsets[i];
5649 etype_off = dummy_pkt->offsets[1].offset;
5651 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5653 pkt = kzalloc(buf_len, GFP_KERNEL);
5656 return ERR_PTR(-ENOMEM);
5659 memcpy(pkt, dummy_pkt->pkt, etype_off);
5660 memcpy(pkt + etype_off,
5661 num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5663 memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5664 dummy_pkt->pkt_len - etype_off);
5666 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5670 return ERR_PTR(-ENOMEM);
5673 profile->offsets = offsets;
5675 profile->pkt_len = buf_len;
5676 profile->match |= ICE_PKT_KMALLOC;
5682 * ice_find_dummy_packet - find dummy packet
5684 * @lkups: lookup elements or match criteria for the advanced recipe, one
5685 * structure per protocol header
5686 * @lkups_cnt: number of protocols
5687 * @tun_type: tunnel type
5689 * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5691 static const struct ice_dummy_pkt_profile *
5692 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5693 enum ice_sw_tunnel_type tun_type)
5695 const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5696 u32 match = 0, vlan_count = 0;
5700 case ICE_SW_TUN_GTPC:
5701 match |= ICE_PKT_TUN_GTPC;
5703 case ICE_SW_TUN_GTPU:
5704 match |= ICE_PKT_TUN_GTPU;
5706 case ICE_SW_TUN_NVGRE:
5707 match |= ICE_PKT_TUN_NVGRE;
5709 case ICE_SW_TUN_GENEVE:
5710 case ICE_SW_TUN_VXLAN:
5711 match |= ICE_PKT_TUN_UDP;
5717 for (i = 0; i < lkups_cnt; i++) {
5718 if (lkups[i].type == ICE_UDP_ILOS)
5719 match |= ICE_PKT_INNER_UDP;
5720 else if (lkups[i].type == ICE_TCP_IL)
5721 match |= ICE_PKT_INNER_TCP;
5722 else if (lkups[i].type == ICE_IPV6_OFOS)
5723 match |= ICE_PKT_OUTER_IPV6;
5724 else if (lkups[i].type == ICE_VLAN_OFOS ||
5725 lkups[i].type == ICE_VLAN_EX)
5727 else if (lkups[i].type == ICE_VLAN_IN)
5729 else if (lkups[i].type == ICE_ETYPE_OL &&
5730 lkups[i].h_u.ethertype.ethtype_id ==
5731 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5732 lkups[i].m_u.ethertype.ethtype_id ==
5733 cpu_to_be16(0xFFFF))
5734 match |= ICE_PKT_OUTER_IPV6;
5735 else if (lkups[i].type == ICE_ETYPE_IL &&
5736 lkups[i].h_u.ethertype.ethtype_id ==
5737 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5738 lkups[i].m_u.ethertype.ethtype_id ==
5739 cpu_to_be16(0xFFFF))
5740 match |= ICE_PKT_INNER_IPV6;
5741 else if (lkups[i].type == ICE_IPV6_IL)
5742 match |= ICE_PKT_INNER_IPV6;
5743 else if (lkups[i].type == ICE_GTP_NO_PAY)
5744 match |= ICE_PKT_GTP_NOPAY;
5745 else if (lkups[i].type == ICE_PPPOE) {
5746 match |= ICE_PKT_PPPOE;
5747 if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5749 match |= ICE_PKT_OUTER_IPV6;
5753 while (ret->match && (match & ret->match) != ret->match)
5756 if (vlan_count != 0)
5757 ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5763 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5765 * @lkups: lookup elements or match criteria for the advanced recipe, one
5766 * structure per protocol header
5767 * @lkups_cnt: number of protocols
5768 * @s_rule: stores rule information from the match criteria
5769 * @profile: dummy packet profile (the template, its size and header offsets)
5772 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5773 struct ice_sw_rule_lkup_rx_tx *s_rule,
5774 const struct ice_dummy_pkt_profile *profile)
5779 /* Start with a packet with a pre-defined/dummy content. Then, fill
5780 * in the header values to be looked up or matched.
5782 pkt = s_rule->hdr_data;
5784 memcpy(pkt, profile->pkt, profile->pkt_len);
5786 for (i = 0; i < lkups_cnt; i++) {
5787 const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5788 enum ice_protocol_type type;
5789 u16 offset = 0, len = 0, j;
5792 /* find the start of this layer; it should be found since this
5793 * was already checked when search for the dummy packet
5795 type = lkups[i].type;
5796 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5797 if (type == offsets[j].type) {
5798 offset = offsets[j].offset;
5803 /* this should never happen in a correct calling sequence */
5807 switch (lkups[i].type) {
5810 len = sizeof(struct ice_ether_hdr);
5814 len = sizeof(struct ice_ethtype_hdr);
5819 len = sizeof(struct ice_vlan_hdr);
5823 len = sizeof(struct ice_ipv4_hdr);
5827 len = sizeof(struct ice_ipv6_hdr);
5832 len = sizeof(struct ice_l4_hdr);
5835 len = sizeof(struct ice_sctp_hdr);
5838 len = sizeof(struct ice_nvgre_hdr);
5842 len = sizeof(struct ice_udp_tnl_hdr);
5844 case ICE_GTP_NO_PAY:
5846 len = sizeof(struct ice_udp_gtp_hdr);
5849 len = sizeof(struct ice_pppoe_hdr);
5855 /* the length should be a word multiple */
5856 if (len % ICE_BYTES_PER_WORD)
5859 /* We have the offset to the header start, the length, the
5860 * caller's header values and mask. Use this information to
5861 * copy the data into the dummy packet appropriately based on
5862 * the mask. Note that we need to only write the bits as
5863 * indicated by the mask to make sure we don't improperly write
5864 * over any significant packet data.
5866 for (j = 0; j < len / sizeof(u16); j++) {
5867 u16 *ptr = (u16 *)(pkt + offset);
5868 u16 mask = lkups[i].m_raw[j];
5873 ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5877 s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5883 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5884 * @hw: pointer to the hardware structure
5885 * @tun_type: tunnel type
5886 * @pkt: dummy packet to fill in
5887 * @offsets: offset info for the dummy packet
5890 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5891 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5896 case ICE_SW_TUN_VXLAN:
5897 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5900 case ICE_SW_TUN_GENEVE:
5901 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5905 /* Nothing needs to be done for this tunnel type */
5909 /* Find the outer UDP protocol header and insert the port number */
5910 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5911 if (offsets[i].type == ICE_UDP_OF) {
5912 struct ice_l4_hdr *hdr;
5915 offset = offsets[i].offset;
5916 hdr = (struct ice_l4_hdr *)&pkt[offset];
5917 hdr->dst_port = cpu_to_be16(open_port);
5927 * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5928 * @vlan_type: VLAN tag type
5929 * @pkt: dummy packet to fill in
5930 * @offsets: offset info for the dummy packet
5933 ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
5934 const struct ice_dummy_pkt_offsets *offsets)
5938 /* Find VLAN header and insert VLAN TPID */
5939 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5940 if (offsets[i].type == ICE_VLAN_OFOS ||
5941 offsets[i].type == ICE_VLAN_EX) {
5942 struct ice_vlan_hdr *hdr;
5945 offset = offsets[i].offset;
5946 hdr = (struct ice_vlan_hdr *)&pkt[offset];
5947 hdr->type = cpu_to_be16(vlan_type);
5957 * ice_find_adv_rule_entry - Search a rule entry
5958 * @hw: pointer to the hardware structure
5959 * @lkups: lookup elements or match criteria for the advanced recipe, one
5960 * structure per protocol header
5961 * @lkups_cnt: number of protocols
5962 * @recp_id: recipe ID for which we are finding the rule
5963 * @rinfo: other information regarding the rule e.g. priority and action info
5965 * Helper function to search for a given advance rule entry
5966 * Returns pointer to entry storing the rule if found
5968 static struct ice_adv_fltr_mgmt_list_entry *
5969 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5970 u16 lkups_cnt, u16 recp_id,
5971 struct ice_adv_rule_info *rinfo)
5973 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5974 struct ice_switch_info *sw = hw->switch_info;
5977 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5979 bool lkups_matched = true;
5981 if (lkups_cnt != list_itr->lkups_cnt)
5983 for (i = 0; i < list_itr->lkups_cnt; i++)
5984 if (memcmp(&list_itr->lkups[i], &lkups[i],
5986 lkups_matched = false;
5989 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5990 rinfo->tun_type == list_itr->rule_info.tun_type &&
5991 rinfo->vlan_type == list_itr->rule_info.vlan_type &&
5999 * ice_adv_add_update_vsi_list
6000 * @hw: pointer to the hardware structure
6001 * @m_entry: pointer to current adv filter management list entry
6002 * @cur_fltr: filter information from the book keeping entry
6003 * @new_fltr: filter information with the new VSI to be added
6005 * Call AQ command to add or update previously created VSI list with new VSI.
6007 * Helper function to do book keeping associated with adding filter information
6008 * The algorithm to do the booking keeping is described below :
6009 * When a VSI needs to subscribe to a given advanced filter
6010 * if only one VSI has been added till now
6011 * Allocate a new VSI list and add two VSIs
6012 * to this list using switch rule command
6013 * Update the previously created switch rule with the
6014 * newly created VSI list ID
6015 * if a VSI list was previously created
6016 * Add the new VSI to the previously created VSI list set
6017 * using the update switch rule command
6020 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6021 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6022 struct ice_adv_rule_info *cur_fltr,
6023 struct ice_adv_rule_info *new_fltr)
6025 u16 vsi_list_id = 0;
6028 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6029 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6030 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6033 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6034 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6035 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6036 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6039 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6040 /* Only one entry existed in the mapping and it was not already
6041 * a part of a VSI list. So, create a VSI list with the old and
6044 struct ice_fltr_info tmp_fltr;
6045 u16 vsi_handle_arr[2];
6047 /* A rule already exists with the new VSI being added */
6048 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6049 new_fltr->sw_act.fwd_id.hw_vsi_id)
6052 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6053 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6054 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6060 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6061 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
6062 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6063 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6064 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6065 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6067 /* Update the previous switch rule of "forward to VSI" to
6070 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6074 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6075 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6076 m_entry->vsi_list_info =
6077 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6080 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6082 if (!m_entry->vsi_list_info)
6085 /* A rule already exists with the new VSI being added */
6086 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
6089 /* Update the previously created VSI list set with
6090 * the new VSI ID passed in
6092 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6094 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6096 ice_aqc_opc_update_sw_rules,
6098 /* update VSI list mapping info with new VSI ID */
6100 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
6103 m_entry->vsi_count++;
6108 * ice_add_adv_rule - helper function to create an advanced switch rule
6109 * @hw: pointer to the hardware structure
6110 * @lkups: information on the words that needs to be looked up. All words
6111 * together makes one recipe
6112 * @lkups_cnt: num of entries in the lkups array
6113 * @rinfo: other information related to the rule that needs to be programmed
6114 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6115 * ignored is case of error.
6117 * This function can program only 1 rule at a time. The lkups is used to
6118 * describe the all the words that forms the "lookup" portion of the recipe.
6119 * These words can span multiple protocols. Callers to this function need to
6120 * pass in a list of protocol headers with lookup information along and mask
6121 * that determines which words are valid from the given protocol header.
6122 * rinfo describes other information related to this rule such as forwarding
6123 * IDs, priority of this rule, etc.
6126 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6127 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6128 struct ice_rule_query_data *added_entry)
6130 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6131 struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6132 const struct ice_dummy_pkt_profile *profile;
6133 u16 rid = 0, i, rule_buf_sz, vsi_handle;
6134 struct list_head *rule_head;
6135 struct ice_switch_info *sw;
6141 /* Initialize profile to result index bitmap */
6142 if (!hw->switch_info->prof_res_bm_init) {
6143 hw->switch_info->prof_res_bm_init = 1;
6144 ice_init_prof_result_bm(hw);
6150 /* get # of words we need to match */
6152 for (i = 0; i < lkups_cnt; i++) {
6155 for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6156 if (lkups[i].m_raw[j])
6163 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6166 /* locate a dummy packet */
6167 profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6168 if (IS_ERR(profile))
6169 return PTR_ERR(profile);
6171 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6172 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6173 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6174 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
6176 goto free_pkt_profile;
6179 vsi_handle = rinfo->sw_act.vsi_handle;
6180 if (!ice_is_vsi_valid(hw, vsi_handle)) {
6182 goto free_pkt_profile;
6185 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6186 rinfo->sw_act.fwd_id.hw_vsi_id =
6187 ice_get_hw_vsi_num(hw, vsi_handle);
6188 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6189 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6191 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6193 goto free_pkt_profile;
6194 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6196 /* we have to add VSI to VSI_LIST and increment vsi_count.
6197 * Also Update VSI list so that we can change forwarding rule
6198 * if the rule already exists, we will check if it exists with
6199 * same vsi_id, if not then add it to the VSI list if it already
6200 * exists if not then create a VSI list and add the existing VSI
6201 * ID and the new VSI ID to the list
6202 * We will add that VSI to the list
6204 status = ice_adv_add_update_vsi_list(hw, m_entry,
6205 &m_entry->rule_info,
6208 added_entry->rid = rid;
6209 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6210 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6212 goto free_pkt_profile;
6214 rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6215 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6218 goto free_pkt_profile;
6220 if (!rinfo->flags_info.act_valid) {
6221 act |= ICE_SINGLE_ACT_LAN_ENABLE;
6222 act |= ICE_SINGLE_ACT_LB_ENABLE;
6224 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6225 ICE_SINGLE_ACT_LB_ENABLE);
6228 switch (rinfo->sw_act.fltr_act) {
6229 case ICE_FWD_TO_VSI:
6230 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6231 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6232 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6235 act |= ICE_SINGLE_ACT_TO_Q;
6236 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6237 ICE_SINGLE_ACT_Q_INDEX_M;
6239 case ICE_FWD_TO_QGRP:
6240 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6241 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6242 act |= ICE_SINGLE_ACT_TO_Q;
6243 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6244 ICE_SINGLE_ACT_Q_INDEX_M;
6245 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6246 ICE_SINGLE_ACT_Q_REGION_M;
6248 case ICE_DROP_PACKET:
6249 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6250 ICE_SINGLE_ACT_VALID_BIT;
6254 goto err_ice_add_adv_rule;
6257 /* set the rule LOOKUP type based on caller specified 'Rx'
6258 * instead of hardcoding it to be either LOOKUP_TX/RX
6260 * for 'Rx' set the source to be the port number
6261 * for 'Tx' set the source to be the source HW VSI number (determined
6265 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6266 s_rule->src = cpu_to_le16(hw->port_info->lport);
6268 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6269 s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6272 s_rule->recipe_id = cpu_to_le16(rid);
6273 s_rule->act = cpu_to_le32(act);
6275 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6277 goto err_ice_add_adv_rule;
6279 if (rinfo->tun_type != ICE_NON_TUN &&
6280 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6281 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6285 goto err_ice_add_adv_rule;
6288 if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
6289 status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
6293 goto err_ice_add_adv_rule;
6296 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6297 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6300 goto err_ice_add_adv_rule;
6301 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6302 sizeof(struct ice_adv_fltr_mgmt_list_entry),
6306 goto err_ice_add_adv_rule;
6309 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6310 lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6311 if (!adv_fltr->lkups) {
6313 goto err_ice_add_adv_rule;
6316 adv_fltr->lkups_cnt = lkups_cnt;
6317 adv_fltr->rule_info = *rinfo;
6318 adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6319 sw = hw->switch_info;
6320 sw->recp_list[rid].adv_rule = true;
6321 rule_head = &sw->recp_list[rid].filt_rules;
6323 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6324 adv_fltr->vsi_count = 1;
6326 /* Add rule entry to book keeping list */
6327 list_add(&adv_fltr->list_entry, rule_head);
6329 added_entry->rid = rid;
6330 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6331 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6333 err_ice_add_adv_rule:
6334 if (status && adv_fltr) {
6335 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6336 devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6342 if (profile->match & ICE_PKT_KMALLOC) {
6343 kfree(profile->offsets);
6344 kfree(profile->pkt);
6352 * ice_replay_vsi_fltr - Replay filters for requested VSI
6353 * @hw: pointer to the hardware structure
6354 * @vsi_handle: driver VSI handle
6355 * @recp_id: Recipe ID for which rules need to be replayed
6356 * @list_head: list for which filters need to be replayed
6358 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6359 * It is required to pass valid VSI handle.
6362 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6363 struct list_head *list_head)
6365 struct ice_fltr_mgmt_list_entry *itr;
6369 if (list_empty(list_head))
6371 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6373 list_for_each_entry(itr, list_head, list_entry) {
6374 struct ice_fltr_list_entry f_entry;
6376 f_entry.fltr_info = itr->fltr_info;
6377 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6378 itr->fltr_info.vsi_handle == vsi_handle) {
6379 /* update the src in case it is VSI num */
6380 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6381 f_entry.fltr_info.src = hw_vsi_id;
6382 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6387 if (!itr->vsi_list_info ||
6388 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6390 /* Clearing it so that the logic can add it back */
6391 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6392 f_entry.fltr_info.vsi_handle = vsi_handle;
6393 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6394 /* update the src in case it is VSI num */
6395 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6396 f_entry.fltr_info.src = hw_vsi_id;
6397 if (recp_id == ICE_SW_LKUP_VLAN)
6398 status = ice_add_vlan_internal(hw, &f_entry);
6400 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6409 * ice_adv_rem_update_vsi_list
6410 * @hw: pointer to the hardware structure
6411 * @vsi_handle: VSI handle of the VSI to remove
6412 * @fm_list: filter management entry for which the VSI list management needs to
6416 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6417 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6419 struct ice_vsi_list_map_info *vsi_list_info;
6420 enum ice_sw_lkup_type lkup_type;
6424 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6425 fm_list->vsi_count == 0)
6428 /* A rule with the VSI being removed does not exist */
6429 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6432 lkup_type = ICE_SW_LKUP_LAST;
6433 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6434 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6435 ice_aqc_opc_update_sw_rules,
6440 fm_list->vsi_count--;
6441 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6442 vsi_list_info = fm_list->vsi_list_info;
6443 if (fm_list->vsi_count == 1) {
6444 struct ice_fltr_info tmp_fltr;
6447 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6449 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6452 /* Make sure VSI list is empty before removing it below */
6453 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6455 ice_aqc_opc_update_sw_rules,
6460 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6461 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6462 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6463 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6464 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6465 tmp_fltr.fwd_id.hw_vsi_id =
6466 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6467 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6468 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6469 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6471 /* Update the previous switch rule of "MAC forward to VSI" to
6472 * "MAC fwd to VSI list"
6474 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6476 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6477 tmp_fltr.fwd_id.hw_vsi_id, status);
6480 fm_list->vsi_list_info->ref_cnt--;
6482 /* Remove the VSI list since it is no longer used */
6483 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6485 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6486 vsi_list_id, status);
6490 list_del(&vsi_list_info->list_entry);
6491 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6492 fm_list->vsi_list_info = NULL;
6499 * ice_rem_adv_rule - removes existing advanced switch rule
6500 * @hw: pointer to the hardware structure
6501 * @lkups: information on the words that needs to be looked up. All words
6502 * together makes one recipe
6503 * @lkups_cnt: num of entries in the lkups array
6504 * @rinfo: Its the pointer to the rule information for the rule
6506 * This function can be used to remove 1 rule at a time. The lkups is
6507 * used to describe all the words that forms the "lookup" portion of the
6508 * rule. These words can span multiple protocols. Callers to this function
6509 * need to pass in a list of protocol headers with lookup information along
6510 * and mask that determines which words are valid from the given protocol
6511 * header. rinfo describes other information related to this rule such as
6512 * forwarding IDs, priority of this rule, etc.
6515 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6516 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6518 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6519 struct ice_prot_lkup_ext lkup_exts;
6520 bool remove_rule = false;
6521 struct mutex *rule_lock; /* Lock to protect filter rule list */
6522 u16 i, rid, vsi_handle;
6525 memset(&lkup_exts, 0, sizeof(lkup_exts));
6526 for (i = 0; i < lkups_cnt; i++) {
6529 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6532 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6537 /* Create any special protocol/offset pairs, such as looking at tunnel
6538 * bits by extracting metadata
6540 status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
6544 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6545 /* If did not find a recipe that match the existing criteria */
6546 if (rid == ICE_MAX_NUM_RECIPES)
6549 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6550 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6551 /* the rule is already removed */
6554 mutex_lock(rule_lock);
6555 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6557 } else if (list_elem->vsi_count > 1) {
6558 remove_rule = false;
6559 vsi_handle = rinfo->sw_act.vsi_handle;
6560 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6562 vsi_handle = rinfo->sw_act.vsi_handle;
6563 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6565 mutex_unlock(rule_lock);
6568 if (list_elem->vsi_count == 0)
6571 mutex_unlock(rule_lock);
6573 struct ice_sw_rule_lkup_rx_tx *s_rule;
6576 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6577 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6581 s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6582 s_rule->hdr_len = 0;
6583 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6585 ice_aqc_opc_remove_sw_rules, NULL);
6586 if (!status || status == -ENOENT) {
6587 struct ice_switch_info *sw = hw->switch_info;
6589 mutex_lock(rule_lock);
6590 list_del(&list_elem->list_entry);
6591 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6592 devm_kfree(ice_hw_to_dev(hw), list_elem);
6593 mutex_unlock(rule_lock);
6594 if (list_empty(&sw->recp_list[rid].filt_rules))
6595 sw->recp_list[rid].adv_rule = false;
6603 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6604 * @hw: pointer to the hardware structure
6605 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6607 * This function is used to remove 1 rule at a time. The removal is based on
6608 * the remove_entry parameter. This function will remove rule for a given
6609 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6612 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6613 struct ice_rule_query_data *remove_entry)
6615 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6616 struct list_head *list_head;
6617 struct ice_adv_rule_info rinfo;
6618 struct ice_switch_info *sw;
6620 sw = hw->switch_info;
6621 if (!sw->recp_list[remove_entry->rid].recp_created)
6623 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6624 list_for_each_entry(list_itr, list_head, list_entry) {
6625 if (list_itr->rule_info.fltr_rule_id ==
6626 remove_entry->rule_id) {
6627 rinfo = list_itr->rule_info;
6628 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6629 return ice_rem_adv_rule(hw, list_itr->lkups,
6630 list_itr->lkups_cnt, &rinfo);
6633 /* either list is empty or unable to find rule */
6638 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6640 * @hw: pointer to the hardware structure
6641 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6643 * This function is used to remove all the rules for a given VSI and as soon
6644 * as removing a rule fails, it will return immediately with the error code,
6645 * else it will return success.
6647 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6649 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6650 struct ice_vsi_list_map_info *map_info;
6651 struct ice_adv_rule_info rinfo;
6652 struct list_head *list_head;
6653 struct ice_switch_info *sw;
6657 sw = hw->switch_info;
6658 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6659 if (!sw->recp_list[rid].recp_created)
6661 if (!sw->recp_list[rid].adv_rule)
6664 list_head = &sw->recp_list[rid].filt_rules;
6665 list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6667 rinfo = list_itr->rule_info;
6669 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6670 map_info = list_itr->vsi_list_info;
6674 if (!test_bit(vsi_handle, map_info->vsi_map))
6676 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6680 rinfo.sw_act.vsi_handle = vsi_handle;
6681 status = ice_rem_adv_rule(hw, list_itr->lkups,
6682 list_itr->lkups_cnt, &rinfo);
6691 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6692 * @hw: pointer to the hardware structure
6693 * @vsi_handle: driver VSI handle
6694 * @list_head: list for which filters need to be replayed
6696 * Replay the advanced rule for the given VSI.
6699 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6700 struct list_head *list_head)
6702 struct ice_rule_query_data added_entry = { 0 };
6703 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6706 if (list_empty(list_head))
6708 list_for_each_entry(adv_fltr, list_head, list_entry) {
6709 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6710 u16 lk_cnt = adv_fltr->lkups_cnt;
6712 if (vsi_handle != rinfo->sw_act.vsi_handle)
6714 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6723 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6724 * @hw: pointer to the hardware structure
6725 * @vsi_handle: driver VSI handle
6727 * Replays filters for requested VSI via vsi_handle.
6729 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6731 struct ice_switch_info *sw = hw->switch_info;
6735 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6736 struct list_head *head;
6738 head = &sw->recp_list[i].filt_replay_rules;
6739 if (!sw->recp_list[i].adv_rule)
6740 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6742 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6750 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6751 * @hw: pointer to the HW struct
6753 * Deletes the filter replay rules.
6755 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6757 struct ice_switch_info *sw = hw->switch_info;
6763 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6764 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6765 struct list_head *l_head;
6767 l_head = &sw->recp_list[i].filt_replay_rules;
6768 if (!sw->recp_list[i].adv_rule)
6769 ice_rem_sw_rule_info(hw, l_head);
6771 ice_rem_adv_rule_info(hw, l_head);