1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020 Marvell International Ltd.
5 * This header file defines the work queue entry (wqe) data structure.
6 * Since this is a commonly used structure that depends on structures
7 * from several hardware blocks, those definitions have been placed
8 * in this file to create a single point of definition of the wqe
10 * Data structures are still named according to the block that they
14 #ifndef __CVMX_WQE_H__
15 #define __CVMX_WQE_H__
17 #include "cvmx-packet.h"
18 #include "cvmx-csr-enums.h"
19 #include "cvmx-pki-defs.h"
20 #include "cvmx-pip-defs.h"
21 #include "octeon-feature.h"
23 #define OCT_TAG_TYPE_STRING(x) \
24 (((x) == CVMX_POW_TAG_TYPE_ORDERED) ? \
26 (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? \
28 (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : "NULL_NULL")))
30 /* Error levels in WQE WORD2 (ERRLEV).*/
31 #define PKI_ERRLEV_E__RE_M 0x0
32 #define PKI_ERRLEV_E__LA_M 0x1
33 #define PKI_ERRLEV_E__LB_M 0x2
34 #define PKI_ERRLEV_E__LC_M 0x3
35 #define PKI_ERRLEV_E__LD_M 0x4
36 #define PKI_ERRLEV_E__LE_M 0x5
37 #define PKI_ERRLEV_E__LF_M 0x6
38 #define PKI_ERRLEV_E__LG_M 0x7
40 enum cvmx_pki_errlevel {
41 CVMX_PKI_ERRLEV_E_RE = PKI_ERRLEV_E__RE_M,
42 CVMX_PKI_ERRLEV_E_LA = PKI_ERRLEV_E__LA_M,
43 CVMX_PKI_ERRLEV_E_LB = PKI_ERRLEV_E__LB_M,
44 CVMX_PKI_ERRLEV_E_LC = PKI_ERRLEV_E__LC_M,
45 CVMX_PKI_ERRLEV_E_LD = PKI_ERRLEV_E__LD_M,
46 CVMX_PKI_ERRLEV_E_LE = PKI_ERRLEV_E__LE_M,
47 CVMX_PKI_ERRLEV_E_LF = PKI_ERRLEV_E__LF_M,
48 CVMX_PKI_ERRLEV_E_LG = PKI_ERRLEV_E__LG_M
51 #define CVMX_PKI_ERRLEV_MAX BIT(3) /* The size of WORD2:ERRLEV field.*/
53 /* Error code in WQE WORD2 (OPCODE).*/
54 #define CVMX_PKI_OPCODE_RE_NONE 0x0
55 #define CVMX_PKI_OPCODE_RE_PARTIAL 0x1
56 #define CVMX_PKI_OPCODE_RE_JABBER 0x2
57 #define CVMX_PKI_OPCODE_RE_FCS 0x7
58 #define CVMX_PKI_OPCODE_RE_FCS_RCV 0x8
59 #define CVMX_PKI_OPCODE_RE_TERMINATE 0x9
60 #define CVMX_PKI_OPCODE_RE_RX_CTL 0xb
61 #define CVMX_PKI_OPCODE_RE_SKIP 0xc
62 #define CVMX_PKI_OPCODE_RE_DMAPKT 0xf
63 #define CVMX_PKI_OPCODE_RE_PKIPAR 0x13
64 #define CVMX_PKI_OPCODE_RE_PKIPCAM 0x14
65 #define CVMX_PKI_OPCODE_RE_MEMOUT 0x15
66 #define CVMX_PKI_OPCODE_RE_BUFS_OFLOW 0x16
67 #define CVMX_PKI_OPCODE_L2_FRAGMENT 0x20
68 #define CVMX_PKI_OPCODE_L2_OVERRUN 0x21
69 #define CVMX_PKI_OPCODE_L2_PFCS 0x22
70 #define CVMX_PKI_OPCODE_L2_PUNY 0x23
71 #define CVMX_PKI_OPCODE_L2_MAL 0x24
72 #define CVMX_PKI_OPCODE_L2_OVERSIZE 0x25
73 #define CVMX_PKI_OPCODE_L2_UNDERSIZE 0x26
74 #define CVMX_PKI_OPCODE_L2_LENMISM 0x27
75 #define CVMX_PKI_OPCODE_IP_NOT 0x41
76 #define CVMX_PKI_OPCODE_IP_CHK 0x42
77 #define CVMX_PKI_OPCODE_IP_MAL 0x43
78 #define CVMX_PKI_OPCODE_IP_MALD 0x44
79 #define CVMX_PKI_OPCODE_IP_HOP 0x45
80 #define CVMX_PKI_OPCODE_L4_MAL 0x61
81 #define CVMX_PKI_OPCODE_L4_CHK 0x62
82 #define CVMX_PKI_OPCODE_L4_LEN 0x63
83 #define CVMX_PKI_OPCODE_L4_PORT 0x64
84 #define CVMX_PKI_OPCODE_TCP_FLAG 0x65
86 #define CVMX_PKI_OPCODE_MAX BIT(8) /* The size of WORD2:OPCODE field.*/
88 /* Layer types in pki */
89 #define CVMX_PKI_LTYPE_E_NONE_M 0x0
90 #define CVMX_PKI_LTYPE_E_ENET_M 0x1
91 #define CVMX_PKI_LTYPE_E_VLAN_M 0x2
92 #define CVMX_PKI_LTYPE_E_SNAP_PAYLD_M 0x5
93 #define CVMX_PKI_LTYPE_E_ARP_M 0x6
94 #define CVMX_PKI_LTYPE_E_RARP_M 0x7
95 #define CVMX_PKI_LTYPE_E_IP4_M 0x8
96 #define CVMX_PKI_LTYPE_E_IP4_OPT_M 0x9
97 #define CVMX_PKI_LTYPE_E_IP6_M 0xA
98 #define CVMX_PKI_LTYPE_E_IP6_OPT_M 0xB
99 #define CVMX_PKI_LTYPE_E_IPSEC_ESP_M 0xC
100 #define CVMX_PKI_LTYPE_E_IPFRAG_M 0xD
101 #define CVMX_PKI_LTYPE_E_IPCOMP_M 0xE
102 #define CVMX_PKI_LTYPE_E_TCP_M 0x10
103 #define CVMX_PKI_LTYPE_E_UDP_M 0x11
104 #define CVMX_PKI_LTYPE_E_SCTP_M 0x12
105 #define CVMX_PKI_LTYPE_E_UDP_VXLAN_M 0x13
106 #define CVMX_PKI_LTYPE_E_GRE_M 0x14
107 #define CVMX_PKI_LTYPE_E_NVGRE_M 0x15
108 #define CVMX_PKI_LTYPE_E_GTP_M 0x16
109 #define CVMX_PKI_LTYPE_E_SW28_M 0x1C
110 #define CVMX_PKI_LTYPE_E_SW29_M 0x1D
111 #define CVMX_PKI_LTYPE_E_SW30_M 0x1E
112 #define CVMX_PKI_LTYPE_E_SW31_M 0x1F
114 enum cvmx_pki_layer_type {
115 CVMX_PKI_LTYPE_E_NONE = CVMX_PKI_LTYPE_E_NONE_M,
116 CVMX_PKI_LTYPE_E_ENET = CVMX_PKI_LTYPE_E_ENET_M,
117 CVMX_PKI_LTYPE_E_VLAN = CVMX_PKI_LTYPE_E_VLAN_M,
118 CVMX_PKI_LTYPE_E_SNAP_PAYLD = CVMX_PKI_LTYPE_E_SNAP_PAYLD_M,
119 CVMX_PKI_LTYPE_E_ARP = CVMX_PKI_LTYPE_E_ARP_M,
120 CVMX_PKI_LTYPE_E_RARP = CVMX_PKI_LTYPE_E_RARP_M,
121 CVMX_PKI_LTYPE_E_IP4 = CVMX_PKI_LTYPE_E_IP4_M,
122 CVMX_PKI_LTYPE_E_IP4_OPT = CVMX_PKI_LTYPE_E_IP4_OPT_M,
123 CVMX_PKI_LTYPE_E_IP6 = CVMX_PKI_LTYPE_E_IP6_M,
124 CVMX_PKI_LTYPE_E_IP6_OPT = CVMX_PKI_LTYPE_E_IP6_OPT_M,
125 CVMX_PKI_LTYPE_E_IPSEC_ESP = CVMX_PKI_LTYPE_E_IPSEC_ESP_M,
126 CVMX_PKI_LTYPE_E_IPFRAG = CVMX_PKI_LTYPE_E_IPFRAG_M,
127 CVMX_PKI_LTYPE_E_IPCOMP = CVMX_PKI_LTYPE_E_IPCOMP_M,
128 CVMX_PKI_LTYPE_E_TCP = CVMX_PKI_LTYPE_E_TCP_M,
129 CVMX_PKI_LTYPE_E_UDP = CVMX_PKI_LTYPE_E_UDP_M,
130 CVMX_PKI_LTYPE_E_SCTP = CVMX_PKI_LTYPE_E_SCTP_M,
131 CVMX_PKI_LTYPE_E_UDP_VXLAN = CVMX_PKI_LTYPE_E_UDP_VXLAN_M,
132 CVMX_PKI_LTYPE_E_GRE = CVMX_PKI_LTYPE_E_GRE_M,
133 CVMX_PKI_LTYPE_E_NVGRE = CVMX_PKI_LTYPE_E_NVGRE_M,
134 CVMX_PKI_LTYPE_E_GTP = CVMX_PKI_LTYPE_E_GTP_M,
135 CVMX_PKI_LTYPE_E_SW28 = CVMX_PKI_LTYPE_E_SW28_M,
136 CVMX_PKI_LTYPE_E_SW29 = CVMX_PKI_LTYPE_E_SW29_M,
137 CVMX_PKI_LTYPE_E_SW30 = CVMX_PKI_LTYPE_E_SW30_M,
138 CVMX_PKI_LTYPE_E_SW31 = CVMX_PKI_LTYPE_E_SW31_M,
139 CVMX_PKI_LTYPE_E_MAX = CVMX_PKI_LTYPE_E_SW31
154 } cvmx_pki_wqe_word4_t;
157 * HW decode / err_code in work queue entry
165 u64 vlan_stacked : 1;
188 u64 vlan_stacked : 1;
211 u64 vlan_stacked : 1;
216 u64 unassigned2a : 4;
241 u64 vlan_stacked : 1;
261 u64 vlan_stacked : 1;
281 u64 vlan_stacked : 1;
286 u64 unassigned2a : 8;
298 } cvmx_pip_wqe_word2_t;
313 u64 vlan_stacked : 1;
328 } cvmx_pki_wqe_word2_t;
332 cvmx_pki_wqe_word2_t pki;
333 cvmx_pip_wqe_word2_t pip;
344 u64 l4ptr : 8; /* 56..63 */
345 u64 unused0 : 8; /* 48..55 */
346 u64 l3ptr : 8; /* 40..47 */
347 u64 l2ptr : 8; /* 32..39 */
348 u64 unused1 : 18; /* 14..31 */
349 u64 bpid : 6; /* 8..13 */
350 u64 unused2 : 2; /* 6..7 */
351 u64 pknd : 6; /* 0..5 */
353 } cvmx_pip_wqe_word0_t;
368 } cvmx_pki_wqe_word0_t;
370 /* Use reserved bit, set by HW to 0, to indicate buf_ptr legacy translation*/
371 #define pki_wqe_translated word0.rsvd_1
375 cvmx_pip_wqe_word0_t pip;
376 cvmx_pki_wqe_word0_t pki;
379 u64 next_ptr : 40; /* On cn68xx this is unused as well */
390 cvmx_pow_tag_type_t tag_type : 2;
393 } cvmx_pki_wqe_word1_t;
395 #define pki_errata20776 word1.rsvd_0
402 cvmx_pow_tag_type_t tag_type : 2;
405 cvmx_pki_wqe_word1_t cn78xx;
413 cvmx_pow_tag_type_t tag_type : 2;
422 cvmx_pow_tag_type_t tag_type : 2;
452 * Work queue entry format.
453 * Must be 8-byte aligned.
455 typedef struct cvmx_wqe_s {
456 /*-------------------------------------------------------------------*/
458 /*-------------------------------------------------------------------*/
459 /* HW WRITE: the following 64 bits are filled by HW when a packet
462 cvmx_wqe_word0_t word0;
464 /*-------------------------------------------------------------------*/
466 /*-------------------------------------------------------------------*/
467 /* HW WRITE: the following 64 bits are filled by HW when a packet
470 cvmx_wqe_word1_t word1;
472 /*-------------------------------------------------------------------*/
474 /*-------------------------------------------------------------------*/
475 /* HW WRITE: the following 64-bits are filled in by hardware when a
476 * packet arrives. This indicates a variety of status and error
479 cvmx_pip_wqe_word2_t word2;
481 /* Pointer to the first segment of the packet. */
482 cvmx_buf_ptr_t packet_ptr;
484 /* HW WRITE: OCTEON will fill in a programmable amount from the packet,
485 * up to (at most, but perhaps less) the amount needed to fill the work
486 * queue entry to 128 bytes. If the packet is recognized to be IP, the
487 * hardware starts (except that the IPv4 header is padded for
488 * appropriate alignment) writing here where the IP header starts.
489 * If the packet is not recognized to be IP, the hardware starts
490 * writing the beginning of the packet here.
494 /* If desired, SW can make the work Q entry any length. For the purposes
495 * of discussion here, Assume 128B always, as this is all that the hardware
498 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
501 * Work queue entry format for NQM
502 * Must be 8-byte aligned
504 typedef struct cvmx_wqe_nqm_s {
505 /*-------------------------------------------------------------------*/
507 /*-------------------------------------------------------------------*/
508 /* HW WRITE: the following 64 bits are filled by HW when a packet
511 cvmx_wqe_word0_t word0;
513 /*-------------------------------------------------------------------*/
515 /*-------------------------------------------------------------------*/
516 /* HW WRITE: the following 64 bits are filled by HW when a packet
519 cvmx_wqe_word1_t word1;
521 /*-------------------------------------------------------------------*/
523 /*-------------------------------------------------------------------*/
527 /*-------------------------------------------------------------------*/
529 /*-------------------------------------------------------------------*/
530 /* NVMe specific information.*/
531 cvmx_wqe_word3_t word3;
533 /*-------------------------------------------------------------------*/
535 /*-------------------------------------------------------------------*/
536 /* NVMe specific information.*/
537 cvmx_wqe_word4_t word4;
539 /* HW WRITE: OCTEON will fill in a programmable amount from the packet,
540 * up to (at most, but perhaps less) the amount needed to fill the work
541 * queue entry to 128 bytes. If the packet is recognized to be IP, the
542 * hardware starts (except that the IPv4 header is padded for
543 * appropriate alignment) writing here where the IP header starts.
544 * If the packet is not recognized to be IP, the hardware starts
545 * writing the beginning of the packet here.
549 /* If desired, SW can make the work Q entry any length.
550 * For the purposes of discussion here, assume 128B always, as this is
551 * all that the hardware deals with.
553 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_nqm_t;
556 * Work queue entry format for 78XX.
557 * In 78XX packet data always resides in WQE buffer unless option
558 * DIS_WQ_DAT=1 in PKI_STYLE_BUF, which causes packet data to use separate buffer.
560 * Must be 8-byte aligned.
563 /*-------------------------------------------------------------------*/
565 /*-------------------------------------------------------------------*/
566 /* HW WRITE: the following 64 bits are filled by HW when a packet
569 cvmx_pki_wqe_word0_t word0;
571 /*-------------------------------------------------------------------*/
573 /*-------------------------------------------------------------------*/
574 /* HW WRITE: the following 64 bits are filled by HW when a packet
577 cvmx_pki_wqe_word1_t word1;
579 /*-------------------------------------------------------------------*/
581 /*-------------------------------------------------------------------*/
582 /* HW WRITE: the following 64-bits are filled in by hardware when a
583 * packet arrives. This indicates a variety of status and error
586 cvmx_pki_wqe_word2_t word2;
588 /*-------------------------------------------------------------------*/
590 /*-------------------------------------------------------------------*/
591 /* Pointer to the first segment of the packet.*/
592 cvmx_buf_ptr_pki_t packet_ptr;
594 /*-------------------------------------------------------------------*/
596 /*-------------------------------------------------------------------*/
597 /* HW WRITE: the following 64-bits are filled in by hardware when a
598 * packet arrives contains a byte pointer to the start of Layer
599 * A/B/C/D/E/F/G relative of start of packet.
601 cvmx_pki_wqe_word4_t word4;
603 /*-------------------------------------------------------------------*/
604 /* WORDs 5/6/7 may be extended there, if WQE_HSZ is set. */
605 /*-------------------------------------------------------------------*/
608 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_78xx_t;
610 /* Node LS-bit position in the WQE[grp] or PKI_QPG_TBL[grp_ok].*/
611 #define CVMX_WQE_GRP_NODE_SHIFT 8
614 * This is an accessor function into the WQE that retreives the
615 * ingress port number, which can also be used as a destination
616 * port number for the same port.
618 * @param work - Work Queue Entrey pointer
619 * @returns returns the normalized port number, also known as "ipd" port
621 static inline int cvmx_wqe_get_port(cvmx_wqe_t *work)
625 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
626 /* In 78xx wqe entry has channel number not port*/
627 port = work->word0.pki.channel;
628 /* For BGX interfaces (0x800 - 0xdff) the 4 LSBs indicate
629 * the PFC channel, must be cleared to normalize to "ipd"
633 /* Node number is in AURA field, make it part of port # */
634 port |= (work->word0.pki.aura >> 10) << 12;
635 } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
636 port = work->word2.s_cn68xx.port;
638 port = work->word1.cn38xx.ipprt;
644 static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
646 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
647 work->word0.pki.channel = port;
648 else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
649 work->word2.s_cn68xx.port = port;
651 work->word1.cn38xx.ipprt = port;
654 static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
658 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
659 /* legacy: GRP[0..2] :=QOS */
660 grp = (0xff & work->word1.cn78xx.grp) >> 3;
661 else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
662 grp = work->word1.cn68xx.grp;
664 grp = work->word1.cn38xx.grp;
669 static inline void cvmx_wqe_set_xgrp(cvmx_wqe_t *work, int grp)
671 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
672 work->word1.cn78xx.grp = grp;
673 else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
674 work->word1.cn68xx.grp = grp;
676 work->word1.cn38xx.grp = grp;
679 static inline int cvmx_wqe_get_xgrp(cvmx_wqe_t *work)
683 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
684 grp = work->word1.cn78xx.grp;
685 else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
686 grp = work->word1.cn68xx.grp;
688 grp = work->word1.cn38xx.grp;
693 static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
695 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
696 unsigned int node = cvmx_get_node_num();
697 /* Legacy: GRP[0..2] :=QOS */
698 work->word1.cn78xx.grp &= 0x7;
699 work->word1.cn78xx.grp |= 0xff & (grp << 3);
700 work->word1.cn78xx.grp |= (node << 8);
701 } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
702 work->word1.cn68xx.grp = grp;
704 work->word1.cn38xx.grp = grp;
708 static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
712 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
713 /* Legacy: GRP[0..2] :=QOS */
714 qos = work->word1.cn78xx.grp & 0x7;
715 } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
716 qos = work->word1.cn68xx.qos;
718 qos = work->word1.cn38xx.qos;
724 static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
726 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
727 /* legacy: GRP[0..2] :=QOS */
728 work->word1.cn78xx.grp &= ~0x7;
729 work->word1.cn78xx.grp |= qos & 0x7;
730 } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
731 work->word1.cn68xx.qos = qos;
733 work->word1.cn38xx.qos = qos;
737 static inline int cvmx_wqe_get_len(cvmx_wqe_t *work)
741 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
742 len = work->word1.cn78xx.len;
743 else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
744 len = work->word1.cn68xx.len;
746 len = work->word1.cn38xx.len;
751 static inline void cvmx_wqe_set_len(cvmx_wqe_t *work, int len)
753 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
754 work->word1.cn78xx.len = len;
755 else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
756 work->word1.cn68xx.len = len;
758 work->word1.cn38xx.len = len;
762 * This function returns, if there was L2/L1 errors detected in packet.
764 * @param work pointer to work queue entry
766 * @return 0 if packet had no error, non-zero to indicate error code.
768 * Please refer to HRM for the specific model for full enumaration of error codes.
769 * With Octeon1/Octeon2 models, the returned code indicates L1/L2 errors.
770 * On CN73XX/CN78XX, the return code is the value of PKI_OPCODE_E,
771 * if it is non-zero, otherwise the returned code will be derived from
772 * PKI_ERRLEV_E such that an error indicated in LayerA will return 0x20,
773 * LayerB - 0x30, LayerC - 0x40 and so forth.
775 static inline int cvmx_wqe_get_rcv_err(cvmx_wqe_t *work)
777 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
778 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
780 if (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_RE || wqe->word2.err_code != 0)
781 return wqe->word2.err_code;
783 return (wqe->word2.err_level << 4) + 0x10;
784 } else if (work->word2.snoip.rcv_error) {
785 return work->word2.snoip.err_code;
791 static inline u32 cvmx_wqe_get_tag(cvmx_wqe_t *work)
793 return work->word1.tag;
796 static inline void cvmx_wqe_set_tag(cvmx_wqe_t *work, u32 tag)
798 work->word1.tag = tag;
801 static inline int cvmx_wqe_get_tt(cvmx_wqe_t *work)
803 return work->word1.tag_type;
806 static inline void cvmx_wqe_set_tt(cvmx_wqe_t *work, int tt)
808 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
809 work->word1.cn78xx.tag_type = (cvmx_pow_tag_type_t)tt;
810 } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
811 work->word1.cn68xx.tag_type = (cvmx_pow_tag_type_t)tt;
812 work->word1.cn68xx.zero_2 = 0;
814 work->word1.cn38xx.tag_type = (cvmx_pow_tag_type_t)tt;
815 work->word1.cn38xx.zero_2 = 0;
819 static inline u8 cvmx_wqe_get_unused8(cvmx_wqe_t *work)
823 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
824 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
826 bits = wqe->word2.rsvd_0;
827 } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
828 bits = work->word0.pip.cn68xx.unused1;
830 bits = work->word0.pip.cn38xx.unused;
836 static inline void cvmx_wqe_set_unused8(cvmx_wqe_t *work, u8 v)
838 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
839 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
841 wqe->word2.rsvd_0 = v;
842 } else if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
843 work->word0.pip.cn68xx.unused1 = v;
845 work->word0.pip.cn38xx.unused = v;
849 static inline u8 cvmx_wqe_get_user_flags(cvmx_wqe_t *work)
851 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
852 return work->word0.pki.rsvd_2;
857 static inline void cvmx_wqe_set_user_flags(cvmx_wqe_t *work, u8 v)
859 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
860 work->word0.pki.rsvd_2 = v;
863 static inline int cvmx_wqe_get_channel(cvmx_wqe_t *work)
865 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
866 return (work->word0.pki.channel);
868 return cvmx_wqe_get_port(work);
871 static inline void cvmx_wqe_set_channel(cvmx_wqe_t *work, int channel)
873 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
874 work->word0.pki.channel = channel;
876 debug("%s: ERROR: not supported for model\n", __func__);
879 static inline int cvmx_wqe_get_aura(cvmx_wqe_t *work)
881 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
882 return (work->word0.pki.aura);
884 return (work->packet_ptr.s.pool);
887 static inline void cvmx_wqe_set_aura(cvmx_wqe_t *work, int aura)
889 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
890 work->word0.pki.aura = aura;
892 work->packet_ptr.s.pool = aura;
895 static inline int cvmx_wqe_get_style(cvmx_wqe_t *work)
897 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
898 return (work->word0.pki.style);
902 static inline void cvmx_wqe_set_style(cvmx_wqe_t *work, int style)
904 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
905 work->word0.pki.style = style;
908 static inline int cvmx_wqe_is_l3_ip(cvmx_wqe_t *work)
910 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
911 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
912 /* Match all 4 values for v4/v6 with.without options */
913 if ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)
915 if ((wqe->word2.le_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)
919 return !work->word2.s_cn38xx.not_IP;
923 static inline int cvmx_wqe_is_l3_ipv4(cvmx_wqe_t *work)
925 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
926 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
927 /* Match 2 values - with/wotuout options */
928 if ((wqe->word2.lc_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP4)
930 if ((wqe->word2.le_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP4)
934 return (!work->word2.s_cn38xx.not_IP &&
935 !work->word2.s_cn38xx.is_v6);
939 static inline int cvmx_wqe_is_l3_ipv6(cvmx_wqe_t *work)
941 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
942 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
943 /* Match 2 values - with/wotuout options */
944 if ((wqe->word2.lc_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP6)
946 if ((wqe->word2.le_hdr_type & 0x1e) == CVMX_PKI_LTYPE_E_IP6)
950 return (!work->word2.s_cn38xx.not_IP &&
951 work->word2.s_cn38xx.is_v6);
955 static inline bool cvmx_wqe_is_l4_udp_or_tcp(cvmx_wqe_t *work)
957 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
958 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
960 if (wqe->word2.lf_hdr_type == CVMX_PKI_LTYPE_E_TCP)
962 if (wqe->word2.lf_hdr_type == CVMX_PKI_LTYPE_E_UDP)
967 if (work->word2.s_cn38xx.not_IP)
970 return (work->word2.s_cn38xx.tcp_or_udp != 0);
973 static inline int cvmx_wqe_is_l2_bcast(cvmx_wqe_t *work)
975 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
976 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
978 return wqe->word2.is_l2_bcast;
980 return work->word2.s_cn38xx.is_bcast;
984 static inline int cvmx_wqe_is_l2_mcast(cvmx_wqe_t *work)
986 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
987 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
989 return wqe->word2.is_l2_mcast;
991 return work->word2.s_cn38xx.is_mcast;
995 static inline void cvmx_wqe_set_l2_bcast(cvmx_wqe_t *work, bool bcast)
997 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
998 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1000 wqe->word2.is_l2_bcast = bcast;
1002 work->word2.s_cn38xx.is_bcast = bcast;
1006 static inline void cvmx_wqe_set_l2_mcast(cvmx_wqe_t *work, bool mcast)
1008 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1009 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1011 wqe->word2.is_l2_mcast = mcast;
1013 work->word2.s_cn38xx.is_mcast = mcast;
1017 static inline int cvmx_wqe_is_l3_bcast(cvmx_wqe_t *work)
1019 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1020 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1022 return wqe->word2.is_l3_bcast;
1024 debug("%s: ERROR: not supported for model\n", __func__);
1028 static inline int cvmx_wqe_is_l3_mcast(cvmx_wqe_t *work)
1030 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1031 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1033 return wqe->word2.is_l3_mcast;
1035 debug("%s: ERROR: not supported for model\n", __func__);
1040 * This function returns is there was IP error detected in packet.
1041 * For 78XX it does not flag ipv4 options and ipv6 extensions.
1042 * For older chips if PIP_GBL_CTL was proviosned to flag ip4_otions and
1043 * ipv6 extension, it will be flag them.
1044 * @param work pointer to work queue entry
1045 * @return 1 -- If IP error was found in packet
1046 * 0 -- If no IP error was found in packet.
1048 static inline int cvmx_wqe_is_ip_exception(cvmx_wqe_t *work)
1050 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1051 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1053 if (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_LC)
1059 return work->word2.s.IP_exc;
1062 static inline int cvmx_wqe_is_l4_error(cvmx_wqe_t *work)
1064 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1065 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1067 if (wqe->word2.err_level == CVMX_PKI_ERRLEV_E_LF)
1072 return work->word2.s.L4_error;
1076 static inline void cvmx_wqe_set_vlan(cvmx_wqe_t *work, bool set)
1078 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1079 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1081 wqe->word2.vlan_valid = set;
1083 work->word2.s.vlan_valid = set;
1087 static inline int cvmx_wqe_is_vlan(cvmx_wqe_t *work)
1089 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1090 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1092 return wqe->word2.vlan_valid;
1094 return work->word2.s.vlan_valid;
1098 static inline int cvmx_wqe_is_vlan_stacked(cvmx_wqe_t *work)
1100 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1101 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1103 return wqe->word2.vlan_stacked;
1105 return work->word2.s.vlan_stacked;
1110 * Extract packet data buffer pointer from work queue entry.
1112 * Returns the legacy (Octeon1/Octeon2) buffer pointer structure
1113 * for the linked buffer list.
1114 * On CN78XX, the native buffer pointer structure is converted into
1115 * the legacy format.
1116 * The legacy buf_ptr is then stored in the WQE, and word0 reserved
1117 * field is set to indicate that the buffer pointers were translated.
1118 * If the packet data is only found inside the work queue entry,
1119 * a standard buffer pointer structure is created for it.
1121 cvmx_buf_ptr_t cvmx_wqe_get_packet_ptr(cvmx_wqe_t *work);
1123 static inline int cvmx_wqe_get_bufs(cvmx_wqe_t *work)
1127 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1128 bufs = work->word0.pki.bufs;
1130 /* Adjust for packet-in-WQE cases */
1131 if (cvmx_unlikely(work->word2.s_cn38xx.bufs == 0 && !work->word2.s.software))
1132 (void)cvmx_wqe_get_packet_ptr(work);
1133 bufs = work->word2.s_cn38xx.bufs;
1139 * Free Work Queue Entry memory
1141 * Will return the WQE buffer to its pool, unless the WQE contains
1142 * non-redundant packet data.
1143 * This function is intended to be called AFTER the packet data
1144 * has been passed along to PKO for transmission and release.
1145 * It can also follow a call to cvmx_helper_free_packet_data()
1146 * to release the WQE after associated data was released.
1148 void cvmx_wqe_free(cvmx_wqe_t *work);
1151 * Check if a work entry has been intiated by software
1154 static inline bool cvmx_wqe_is_soft(cvmx_wqe_t *work)
1156 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1157 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1159 return wqe->word2.software;
1161 return work->word2.s.software;
1166 * Allocate a work-queue entry for delivering software-initiated
1167 * event notifications.
1168 * The application data is copied into the work-queue entry,
1169 * if the space is sufficient.
1171 cvmx_wqe_t *cvmx_wqe_soft_create(void *data_p, unsigned int data_sz);
1173 /* Errata (PKI-20776) PKI_BUFLINK_S's are endian-swapped
1174 * CN78XX pass 1.x has a bug where the packet pointer in each segment is
1175 * written in the opposite endianness of the configured mode. Fix these here.
1177 static inline void cvmx_wqe_pki_errata_20776(cvmx_wqe_t *work)
1179 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1181 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && !wqe->pki_errata20776) {
1183 cvmx_buf_ptr_pki_t buffer_next;
1185 bufs = wqe->word0.bufs;
1186 buffer_next = wqe->packet_ptr;
1188 cvmx_buf_ptr_pki_t next;
1189 void *nextaddr = cvmx_phys_to_ptr(buffer_next.addr - 8);
1191 memcpy(&next, nextaddr, sizeof(next));
1192 next.u64 = __builtin_bswap64(next.u64);
1193 memcpy(nextaddr, &next, sizeof(next));
1197 wqe->pki_errata20776 = 1;
1204 * Extract the native PKI-specific buffer pointer from WQE.
1206 * NOTE: Provisional, may be superceded.
1208 static inline cvmx_buf_ptr_pki_t cvmx_wqe_get_pki_pkt_ptr(cvmx_wqe_t *work)
1210 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1212 if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1213 cvmx_buf_ptr_pki_t x = { 0 };
1217 cvmx_wqe_pki_errata_20776(work);
1218 return wqe->packet_ptr;
1222 * Set the buffer segment count for a packet.
1224 * @return Returns the actual resulting value in the WQE fielda
1227 static inline unsigned int cvmx_wqe_set_bufs(cvmx_wqe_t *work, unsigned int bufs)
1229 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1230 work->word0.pki.bufs = bufs;
1231 return work->word0.pki.bufs;
1234 work->word2.s.bufs = bufs;
1235 return work->word2.s.bufs;
1239 * Get the offset of Layer-3 header,
1240 * only supported when Layer-3 protocol is IPv4 or IPv6.
1242 * @return Returns the offset, or 0 if the offset is not known or unsupported.
1244 * FIXME: Assuming word4 is present.
1246 static inline unsigned int cvmx_wqe_get_l3_offset(cvmx_wqe_t *work)
1248 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1249 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1250 /* Match 4 values: IPv4/v6 w/wo options */
1251 if ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)
1252 return wqe->word4.ptr_layer_c;
1254 return work->word2.s.ip_offset;
1261 * Set the offset of Layer-3 header in a packet.
1262 * Typically used when an IP packet is generated by software
1263 * or when the Layer-2 header length is modified, and
1264 * a subsequent recalculation of checksums is anticipated.
1266 * @return Returns the actual value of the work entry offset field.
1268 * FIXME: Assuming word4 is present.
1270 static inline unsigned int cvmx_wqe_set_l3_offset(cvmx_wqe_t *work, unsigned int ip_off)
1272 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1273 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1274 /* Match 4 values: IPv4/v6 w/wo options */
1275 if ((wqe->word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4)
1276 wqe->word4.ptr_layer_c = ip_off;
1278 work->word2.s.ip_offset = ip_off;
1281 return cvmx_wqe_get_l3_offset(work);
1285 * Set the indication that the packet contains a IPv4 Layer-3 * header.
1286 * Use 'cvmx_wqe_set_l3_ipv6()' if the protocol is IPv6.
1287 * When 'set' is false, the call will result in an indication
1288 * that the Layer-3 protocol is neither IPv4 nor IPv6.
1290 * FIXME: Add IPV4_OPT handling based on L3 header length.
1292 static inline void cvmx_wqe_set_l3_ipv4(cvmx_wqe_t *work, bool set)
1294 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1295 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1298 wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_IP4;
1300 wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1302 work->word2.s.not_IP = !set;
1304 work->word2.s_cn38xx.is_v6 = 0;
1309 * Set packet Layer-3 protocol to IPv6.
1311 * FIXME: Add IPV6_OPT handling based on presence of extended headers.
1313 static inline void cvmx_wqe_set_l3_ipv6(cvmx_wqe_t *work, bool set)
1315 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1316 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1319 wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_IP6;
1321 wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1323 work->word2.s_cn38xx.not_IP = !set;
1325 work->word2.s_cn38xx.is_v6 = 1;
1330 * Set a packet Layer-4 protocol type to UDP.
1332 static inline void cvmx_wqe_set_l4_udp(cvmx_wqe_t *work, bool set)
1334 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1335 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1338 wqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_UDP;
1340 wqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1342 if (!work->word2.s_cn38xx.not_IP)
1343 work->word2.s_cn38xx.tcp_or_udp = set;
1348 * Set a packet Layer-4 protocol type to TCP.
1350 static inline void cvmx_wqe_set_l4_tcp(cvmx_wqe_t *work, bool set)
1352 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1353 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1356 wqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_TCP;
1358 wqe->word2.lf_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1360 if (!work->word2.s_cn38xx.not_IP)
1361 work->word2.s_cn38xx.tcp_or_udp = set;
1366 * Set the "software" flag in a work entry.
1368 static inline void cvmx_wqe_set_soft(cvmx_wqe_t *work, bool set)
1370 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1371 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1373 wqe->word2.software = set;
1375 work->word2.s.software = set;
1380 * Return true if the packet is an IP fragment.
1382 static inline bool cvmx_wqe_is_l3_frag(cvmx_wqe_t *work)
1384 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1385 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1387 return (wqe->word2.is_frag != 0);
1390 if (!work->word2.s_cn38xx.not_IP)
1391 return (work->word2.s.is_frag != 0);
1397 * Set the indicator that the packet is an fragmented IP packet.
1399 static inline void cvmx_wqe_set_l3_frag(cvmx_wqe_t *work, bool set)
1401 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1402 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1404 wqe->word2.is_frag = set;
1406 if (!work->word2.s_cn38xx.not_IP)
1407 work->word2.s.is_frag = set;
1412 * Set the packet Layer-3 protocol to RARP.
1414 static inline void cvmx_wqe_set_l3_rarp(cvmx_wqe_t *work, bool set)
1416 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1417 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1420 wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_RARP;
1422 wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1424 work->word2.snoip.is_rarp = set;
1429 * Set the packet Layer-3 protocol to ARP.
1431 static inline void cvmx_wqe_set_l3_arp(cvmx_wqe_t *work, bool set)
1433 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1434 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1437 wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_ARP;
1439 wqe->word2.lc_hdr_type = CVMX_PKI_LTYPE_E_NONE;
1441 work->word2.snoip.is_arp = set;
1446 * Return true if the packet Layer-3 protocol is ARP.
1448 static inline bool cvmx_wqe_is_l3_arp(cvmx_wqe_t *work)
1450 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1451 cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
1453 return (wqe->word2.lc_hdr_type == CVMX_PKI_LTYPE_E_ARP);
1456 if (work->word2.s_cn38xx.not_IP)
1457 return (work->word2.snoip.is_arp != 0);
1462 #endif /* __CVMX_WQE_H__ */