2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include "t4fw_ri_api.h"
39 #define T4_MAX_NUM_QP 65536
40 #define T4_MAX_NUM_CQ 65536
41 #define T4_MAX_NUM_PD 65536
42 #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
43 #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
44 #define T4_MAX_IQ_SIZE (65520 - 1)
45 #define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
46 #define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
47 #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
48 #define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
49 #define T4_MAX_NUM_STAG (1<<15)
50 #define T4_MAX_MR_SIZE (~0ULL)
51 #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
52 #define T4_STAG_UNSET 0xffffffff
54 #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
55 #define A_PCIE_MA_SYNC 0x30b4
57 struct t4_status_page {
58 __be32 rsvd1; /* flit 0 - hw owns */
63 u8 qp_err; /* flit 1 - sw owns */
71 #define T4_EQ_ENTRY_SIZE 64
73 #define T4_SQ_NUM_SLOTS 5
74 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
75 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
76 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
77 #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
78 sizeof(struct fw_ri_immd)))
79 #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
80 sizeof(struct fw_ri_rdma_write_wr) - \
81 sizeof(struct fw_ri_immd)))
82 #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
83 sizeof(struct fw_ri_rdma_write_wr) - \
84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
85 #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
86 sizeof(struct fw_ri_immd)) & ~31UL)
87 #define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
88 #define T4_MAX_FR_DSGL 1024
89 #define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64))
91 static inline int t4_max_fr_depth(int use_dsgl)
93 return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH;
96 #define T4_RQ_NUM_SLOTS 2
97 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
98 #define T4_MAX_RECV_SGE 4
101 struct fw_ri_res_wr res;
103 struct fw_ri_rdma_write_wr write;
104 struct fw_ri_send_wr send;
105 struct fw_ri_rdma_read_wr read;
106 struct fw_ri_bind_mw_wr bind;
107 struct fw_ri_fr_nsmr_wr fr;
108 struct fw_ri_inv_lstag_wr inv;
109 struct t4_status_page status;
110 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
114 struct fw_ri_recv_wr recv;
115 struct t4_status_page status;
116 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
119 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
120 enum fw_wr_opcodes opcode, u8 flags, u8 len16)
122 wqe->send.opcode = (u8)opcode;
123 wqe->send.flags = flags;
124 wqe->send.wrid = wrid;
128 wqe->send.len16 = len16;
131 /* CQE/AE status codes */
132 #define T4_ERR_SUCCESS 0x0
133 #define T4_ERR_STAG 0x1 /* STAG invalid: either the */
134 /* STAG is offlimt, being 0, */
135 /* or STAG_key mismatch */
136 #define T4_ERR_PDID 0x2 /* PDID mismatch */
137 #define T4_ERR_QPID 0x3 /* QPID mismatch */
138 #define T4_ERR_ACCESS 0x4 /* Invalid access right */
139 #define T4_ERR_WRAP 0x5 /* Wrap error */
140 #define T4_ERR_BOUND 0x6 /* base and bounds voilation */
141 #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
142 /* shared memory region */
143 #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
144 /* shared memory region */
145 #define T4_ERR_ECC 0x9 /* ECC error detected */
146 #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
147 /* reading PSTAG for a MW */
149 #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
151 #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
152 #define T4_ERR_CRC 0x10 /* CRC error */
153 #define T4_ERR_MARKER 0x11 /* Marker error */
154 #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
155 #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
156 #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
157 #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
158 #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
159 #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
160 #define T4_ERR_MSN 0x18 /* MSN error */
161 #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
162 #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
164 #define T4_ERR_MSN_GAP 0x1B
165 #define T4_ERR_MSN_RANGE 0x1C
166 #define T4_ERR_IRD_OVERFLOW 0x1D
167 #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
169 #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
196 /* macros for flit 0 of the cqe */
198 #define S_CQE_QPID 12
199 #define M_CQE_QPID 0xFFFFF
200 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
201 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
203 #define S_CQE_SWCQE 11
204 #define M_CQE_SWCQE 0x1
205 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
206 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
208 #define S_CQE_STATUS 5
209 #define M_CQE_STATUS 0x1F
210 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
211 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
214 #define M_CQE_TYPE 0x1
215 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
216 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
218 #define S_CQE_OPCODE 0
219 #define M_CQE_OPCODE 0xF
220 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
221 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
223 #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
224 #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
225 #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
226 #define SQ_TYPE(x) (CQE_TYPE((x)))
227 #define RQ_TYPE(x) (!CQE_TYPE((x)))
228 #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
229 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
231 #define CQE_SEND_OPCODE(x)( \
232 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
233 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
234 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
235 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
237 #define CQE_LEN(x) (be32_to_cpu((x)->len))
239 /* used for RQ completion processing */
240 #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
241 #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
243 /* used for SQ completion processing */
244 #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
246 /* generic accessor macros */
247 #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
248 #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
250 /* macros for flit 3 of the cqe */
251 #define S_CQE_GENBIT 63
252 #define M_CQE_GENBIT 0x1
253 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
254 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
256 #define S_CQE_OVFBIT 62
257 #define M_CQE_OVFBIT 0x1
258 #define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
260 #define S_CQE_IQTYPE 60
261 #define M_CQE_IQTYPE 0x3
262 #define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
264 #define M_CQE_TS 0x0fffffffffffffffULL
265 #define G_CQE_TS(x) ((x) & M_CQE_TS)
267 #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
268 #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
269 #define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
282 static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
284 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
285 return pgprot_writecombine(prot);
287 return pgprot_noncached(prot);
292 T4_SQ_ONCHIP = (1<<0),
298 DEFINE_DMA_UNMAP_ADDR(mapping);
299 unsigned long phys_addr;
300 struct t4_swsqe *sw_sq;
301 struct t4_swsqe *oldest_read;
320 union t4_recv_wr *queue;
322 DEFINE_DMA_UNMAP_ADDR(mapping);
323 struct t4_swrqe *sw_rq;
343 struct c4iw_rdev *rdev;
347 static inline int t4_rqes_posted(struct t4_wq *wq)
349 return wq->rq.in_use;
352 static inline int t4_rq_empty(struct t4_wq *wq)
354 return wq->rq.in_use == 0;
357 static inline int t4_rq_full(struct t4_wq *wq)
359 return wq->rq.in_use == (wq->rq.size - 1);
362 static inline u32 t4_rq_avail(struct t4_wq *wq)
364 return wq->rq.size - 1 - wq->rq.in_use;
367 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
370 if (++wq->rq.pidx == wq->rq.size)
372 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
373 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
374 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
377 static inline void t4_rq_consume(struct t4_wq *wq)
381 if (++wq->rq.cidx == wq->rq.size)
385 static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
387 return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
390 static inline u16 t4_rq_wq_size(struct t4_wq *wq)
392 return wq->rq.size * T4_RQ_NUM_SLOTS;
395 static inline int t4_sq_onchip(struct t4_sq *sq)
397 return sq->flags & T4_SQ_ONCHIP;
400 static inline int t4_sq_empty(struct t4_wq *wq)
402 return wq->sq.in_use == 0;
405 static inline int t4_sq_full(struct t4_wq *wq)
407 return wq->sq.in_use == (wq->sq.size - 1);
410 static inline u32 t4_sq_avail(struct t4_wq *wq)
412 return wq->sq.size - 1 - wq->sq.in_use;
415 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
418 if (++wq->sq.pidx == wq->sq.size)
420 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
421 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
422 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
425 static inline void t4_sq_consume(struct t4_wq *wq)
427 BUG_ON(wq->sq.in_use < 1);
428 if (wq->sq.cidx == wq->sq.flush_cidx)
429 wq->sq.flush_cidx = -1;
431 if (++wq->sq.cidx == wq->sq.size)
435 static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
437 return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
440 static inline u16 t4_sq_wq_size(struct t4_wq *wq)
442 return wq->sq.size * T4_SQ_NUM_SLOTS;
445 /* This function copies 64 byte coalesced work request to memory
446 * mapped BAR2 space. For coalesced WRs, the SGE fetches data
447 * from the FIFO instead of from Host.
449 static inline void pio_copy(u64 __iomem *dst, u64 *src)
461 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
465 /* Flush host queue memory writes. */
468 if (inc == 1 && wqe) {
469 PDBG("%s: WC wq->sq.pidx = %d\n",
470 __func__, wq->sq.pidx);
471 pio_copy(wq->sq.udb + 7, (void *)wqe);
473 PDBG("%s: DB wq->sq.pidx = %d\n",
474 __func__, wq->sq.pidx);
475 writel(PIDX_T5(inc), wq->sq.udb);
478 /* Flush user doorbell area writes. */
482 writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
485 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
486 union t4_recv_wr *wqe)
489 /* Flush host queue memory writes. */
492 if (inc == 1 && wqe) {
493 PDBG("%s: WC wq->rq.pidx = %d\n",
494 __func__, wq->rq.pidx);
495 pio_copy(wq->rq.udb + 7, (void *)wqe);
497 PDBG("%s: DB wq->rq.pidx = %d\n",
498 __func__, wq->rq.pidx);
499 writel(PIDX_T5(inc), wq->rq.udb);
502 /* Flush user doorbell area writes. */
506 writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
509 static inline int t4_wq_in_error(struct t4_wq *wq)
511 return wq->rq.queue[wq->rq.size].status.qp_err;
514 static inline void t4_set_wq_in_error(struct t4_wq *wq)
516 wq->rq.queue[wq->rq.size].status.qp_err = 1;
519 static inline void t4_disable_wq_db(struct t4_wq *wq)
521 wq->rq.queue[wq->rq.size].status.db_off = 1;
524 static inline void t4_enable_wq_db(struct t4_wq *wq)
526 wq->rq.queue[wq->rq.size].status.db_off = 0;
529 static inline int t4_wq_db_enabled(struct t4_wq *wq)
531 return !wq->rq.queue[wq->rq.size].status.db_off;
535 struct t4_cqe *queue;
537 DEFINE_DMA_UNMAP_ADDR(mapping);
538 struct t4_cqe *sw_queue;
540 struct c4iw_rdev *rdev;
545 u16 size; /* including status page */
555 static inline int t4_arm_cq(struct t4_cq *cq, int se)
559 while (cq->cidx_inc > CIDXINC_MASK) {
560 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
561 INGRESSQID(cq->cqid);
562 writel(val, cq->gts);
563 cq->cidx_inc -= CIDXINC_MASK;
565 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
566 INGRESSQID(cq->cqid);
567 writel(val, cq->gts);
572 static inline void t4_swcq_produce(struct t4_cq *cq)
575 if (cq->sw_in_use == cq->size) {
576 PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
580 if (++cq->sw_pidx == cq->size)
584 static inline void t4_swcq_consume(struct t4_cq *cq)
586 BUG_ON(cq->sw_in_use < 1);
588 if (++cq->sw_cidx == cq->size)
592 static inline void t4_hwcq_consume(struct t4_cq *cq)
594 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
595 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
598 val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
599 INGRESSQID(cq->cqid);
600 writel(val, cq->gts);
603 if (++cq->cidx == cq->size) {
609 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
611 return (CQE_GENBIT(cqe) == cq->gen);
614 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
620 prev_cidx = cq->size - 1;
622 prev_cidx = cq->cidx - 1;
624 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
627 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
629 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
631 /* Ensure CQE is flushed to memory */
633 *cqe = &cq->queue[cq->cidx];
640 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
642 if (cq->sw_in_use == cq->size) {
643 PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
649 return &cq->sw_queue[cq->sw_cidx];
653 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
659 else if (cq->sw_in_use)
660 *cqe = &cq->sw_queue[cq->sw_cidx];
662 ret = t4_next_hw_cqe(cq, cqe);
666 static inline int t4_cq_in_error(struct t4_cq *cq)
668 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
671 static inline void t4_set_cq_in_error(struct t4_cq *cq)
673 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
677 struct t4_dev_status_page {