Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / qlogic / qed / qed_iwarp.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/ip.h>
10 #include <linux/ipv6.h>
11 #include <linux/spinlock.h>
12 #include <linux/tcp.h>
13 #include "qed_cxt.h"
14 #include "qed_hw.h"
15 #include "qed_ll2.h"
16 #include "qed_rdma.h"
17 #include "qed_reg_addr.h"
18 #include "qed_sp.h"
19 #include "qed_ooo.h"
20
21 #define QED_IWARP_ORD_DEFAULT           32
22 #define QED_IWARP_IRD_DEFAULT           32
23 #define QED_IWARP_MAX_FW_MSS            4120
24
25 #define QED_EP_SIG 0xecabcdef
26
27 struct mpa_v2_hdr {
28         __be16 ird;
29         __be16 ord;
30 };
31
32 #define MPA_V2_PEER2PEER_MODEL  0x8000
33 #define MPA_V2_SEND_RTR         0x4000  /* on ird */
34 #define MPA_V2_READ_RTR         0x4000  /* on ord */
35 #define MPA_V2_WRITE_RTR        0x8000
36 #define MPA_V2_IRD_ORD_MASK     0x3FFF
37
38 #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
39
40 #define QED_IWARP_INVALID_TCP_CID       0xffffffff
41
42 #define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
43 #define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
44 #define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
45 #define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
46
47 #define QED_IWARP_RCV_WND_SIZE_MIN      (0xffff)
48 #define TIMESTAMP_HEADER_SIZE           (12)
49 #define QED_IWARP_MAX_FIN_RT_DEFAULT    (2)
50
51 #define QED_IWARP_TS_EN                 BIT(0)
52 #define QED_IWARP_DA_EN                 BIT(1)
53 #define QED_IWARP_PARAM_CRC_NEEDED      (1)
54 #define QED_IWARP_PARAM_P2P             (1)
55
56 #define QED_IWARP_DEF_MAX_RT_TIME       (0)
57 #define QED_IWARP_DEF_CWND_FACTOR       (4)
58 #define QED_IWARP_DEF_KA_MAX_PROBE_CNT  (5)
59 #define QED_IWARP_DEF_KA_TIMEOUT        (1200000)       /* 20 min */
60 #define QED_IWARP_DEF_KA_INTERVAL       (1000)          /* 1 sec */
61
62 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
63                                  __le16 echo, union event_ring_data *data,
64                                  u8 fw_return_code);
65
66 /* Override devinfo with iWARP specific values */
67 void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
68 {
69         struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
70
71         dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
72         dev->max_qp = min_t(u32,
73                             IWARP_MAX_QPS,
74                             p_hwfn->p_rdma_info->num_qps) -
75                       QED_IWARP_PREALLOC_CNT;
76
77         dev->max_cq = dev->max_qp;
78
79         dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
80         dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
81 }
82
83 void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
84 {
85         p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
86         qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
87         p_hwfn->b_rdma_enabled_in_prs = true;
88 }
89
90 /* We have two cid maps, one for tcp which should be used only from passive
91  * syn processing and replacing a pre-allocated ep in the list. The second
92  * for active tcp and for QPs.
93  */
94 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
95 {
96         cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
97
98         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
99
100         if (cid < QED_IWARP_PREALLOC_CNT)
101                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
102                                     cid);
103         else
104                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
105
106         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
107 }
108
109 void
110 qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
111                          struct iwarp_init_func_ramrod_data *p_ramrod)
112 {
113         p_ramrod->iwarp.ll2_ooo_q_index =
114             RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
115             p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
116
117         p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
118
119         return;
120 }
121
122 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
123 {
124         int rc;
125
126         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
127         rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
128         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
129         if (rc) {
130                 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
131                 return rc;
132         }
133         *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
134
135         rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
136         if (rc)
137                 qed_iwarp_cid_cleaned(p_hwfn, *cid);
138
139         return rc;
140 }
141
142 static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
143 {
144         cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
145
146         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
147         qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
148         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
149 }
150
151 /* This function allocates a cid for passive tcp (called from syn receive)
152  * the reason it's separate from the regular cid allocation is because it
153  * is assured that these cids already have ilt allocated. They are preallocated
154  * to ensure that we won't need to allocate memory during syn processing
155  */
156 static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
157 {
158         int rc;
159
160         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
161
162         rc = qed_rdma_bmap_alloc_id(p_hwfn,
163                                     &p_hwfn->p_rdma_info->tcp_cid_map, cid);
164
165         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
166
167         if (rc) {
168                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
169                            "can't allocate iwarp tcp cid max-count=%d\n",
170                            p_hwfn->p_rdma_info->tcp_cid_map.max_count);
171
172                 *cid = QED_IWARP_INVALID_TCP_CID;
173                 return rc;
174         }
175
176         *cid += qed_cxt_get_proto_cid_start(p_hwfn,
177                                             p_hwfn->p_rdma_info->proto);
178         return 0;
179 }
180
181 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
182                         struct qed_rdma_qp *qp,
183                         struct qed_rdma_create_qp_out_params *out_params)
184 {
185         struct iwarp_create_qp_ramrod_data *p_ramrod;
186         struct qed_sp_init_data init_data;
187         struct qed_spq_entry *p_ent;
188         u16 physical_queue;
189         u32 cid;
190         int rc;
191
192         qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
193                                               IWARP_SHARED_QUEUE_PAGE_SIZE,
194                                               &qp->shared_queue_phys_addr,
195                                               GFP_KERNEL);
196         if (!qp->shared_queue)
197                 return -ENOMEM;
198
199         out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
200             IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
201         out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
202             IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
203         out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
204             IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
205         out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
206             IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
207
208         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
209         if (rc)
210                 goto err1;
211
212         qp->icid = (u16)cid;
213
214         memset(&init_data, 0, sizeof(init_data));
215         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
216         init_data.cid = qp->icid;
217         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
218
219         rc = qed_sp_init_request(p_hwfn, &p_ent,
220                                  IWARP_RAMROD_CMD_ID_CREATE_QP,
221                                  PROTOCOLID_IWARP, &init_data);
222         if (rc)
223                 goto err2;
224
225         p_ramrod = &p_ent->ramrod.iwarp_create_qp;
226
227         SET_FIELD(p_ramrod->flags,
228                   IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
229                   qp->fmr_and_reserved_lkey);
230
231         SET_FIELD(p_ramrod->flags,
232                   IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
233
234         SET_FIELD(p_ramrod->flags,
235                   IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
236                   qp->incoming_rdma_read_en);
237
238         SET_FIELD(p_ramrod->flags,
239                   IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
240                   qp->incoming_rdma_write_en);
241
242         SET_FIELD(p_ramrod->flags,
243                   IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
244                   qp->incoming_atomic_en);
245
246         SET_FIELD(p_ramrod->flags,
247                   IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
248
249         p_ramrod->pd = cpu_to_le16(qp->pd);
250         p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
251         p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
252
253         p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
254         p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
255         p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
256         p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
257
258         p_ramrod->cq_cid_for_sq =
259             cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
260         p_ramrod->cq_cid_for_rq =
261             cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
262
263         p_ramrod->dpi = cpu_to_le16(qp->dpi);
264
265         physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
266         p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
267         physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
268         p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
269
270         rc = qed_spq_post(p_hwfn, p_ent, NULL);
271         if (rc)
272                 goto err2;
273
274         return rc;
275
276 err2:
277         qed_iwarp_cid_cleaned(p_hwfn, cid);
278 err1:
279         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
280                           IWARP_SHARED_QUEUE_PAGE_SIZE,
281                           qp->shared_queue, qp->shared_queue_phys_addr);
282
283         return rc;
284 }
285
286 static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
287 {
288         struct iwarp_modify_qp_ramrod_data *p_ramrod;
289         struct qed_sp_init_data init_data;
290         struct qed_spq_entry *p_ent;
291         u16 flags, trans_to_state;
292         int rc;
293
294         /* Get SPQ entry */
295         memset(&init_data, 0, sizeof(init_data));
296         init_data.cid = qp->icid;
297         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
298         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
299
300         rc = qed_sp_init_request(p_hwfn, &p_ent,
301                                  IWARP_RAMROD_CMD_ID_MODIFY_QP,
302                                  p_hwfn->p_rdma_info->proto, &init_data);
303         if (rc)
304                 return rc;
305
306         p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
307
308         flags = le16_to_cpu(p_ramrod->flags);
309         SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1);
310         p_ramrod->flags = cpu_to_le16(flags);
311
312         if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
313                 trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
314         else
315                 trans_to_state = IWARP_MODIFY_QP_STATE_ERROR;
316
317         p_ramrod->transition_to_state = cpu_to_le16(trans_to_state);
318
319         rc = qed_spq_post(p_hwfn, p_ent, NULL);
320
321         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
322
323         return rc;
324 }
325
326 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
327 {
328         switch (state) {
329         case QED_ROCE_QP_STATE_RESET:
330         case QED_ROCE_QP_STATE_INIT:
331         case QED_ROCE_QP_STATE_RTR:
332                 return QED_IWARP_QP_STATE_IDLE;
333         case QED_ROCE_QP_STATE_RTS:
334                 return QED_IWARP_QP_STATE_RTS;
335         case QED_ROCE_QP_STATE_SQD:
336                 return QED_IWARP_QP_STATE_CLOSING;
337         case QED_ROCE_QP_STATE_ERR:
338                 return QED_IWARP_QP_STATE_ERROR;
339         case QED_ROCE_QP_STATE_SQE:
340                 return QED_IWARP_QP_STATE_TERMINATE;
341         default:
342                 return QED_IWARP_QP_STATE_ERROR;
343         }
344 }
345
346 static enum qed_roce_qp_state
347 qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
348 {
349         switch (state) {
350         case QED_IWARP_QP_STATE_IDLE:
351                 return QED_ROCE_QP_STATE_INIT;
352         case QED_IWARP_QP_STATE_RTS:
353                 return QED_ROCE_QP_STATE_RTS;
354         case QED_IWARP_QP_STATE_TERMINATE:
355                 return QED_ROCE_QP_STATE_SQE;
356         case QED_IWARP_QP_STATE_CLOSING:
357                 return QED_ROCE_QP_STATE_SQD;
358         case QED_IWARP_QP_STATE_ERROR:
359                 return QED_ROCE_QP_STATE_ERR;
360         default:
361                 return QED_ROCE_QP_STATE_ERR;
362         }
363 }
364
365 static const char * const iwarp_state_names[] = {
366         "IDLE",
367         "RTS",
368         "TERMINATE",
369         "CLOSING",
370         "ERROR",
371 };
372
373 int
374 qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
375                     struct qed_rdma_qp *qp,
376                     enum qed_iwarp_qp_state new_state, bool internal)
377 {
378         enum qed_iwarp_qp_state prev_iw_state;
379         bool modify_fw = false;
380         int rc = 0;
381
382         /* modify QP can be called from upper-layer or as a result of async
383          * RST/FIN... therefore need to protect
384          */
385         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
386         prev_iw_state = qp->iwarp_state;
387
388         if (prev_iw_state == new_state) {
389                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
390                 return 0;
391         }
392
393         switch (prev_iw_state) {
394         case QED_IWARP_QP_STATE_IDLE:
395                 switch (new_state) {
396                 case QED_IWARP_QP_STATE_RTS:
397                         qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
398                         break;
399                 case QED_IWARP_QP_STATE_ERROR:
400                         qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
401                         if (!internal)
402                                 modify_fw = true;
403                         break;
404                 default:
405                         break;
406                 }
407                 break;
408         case QED_IWARP_QP_STATE_RTS:
409                 switch (new_state) {
410                 case QED_IWARP_QP_STATE_CLOSING:
411                         if (!internal)
412                                 modify_fw = true;
413
414                         qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
415                         break;
416                 case QED_IWARP_QP_STATE_ERROR:
417                         if (!internal)
418                                 modify_fw = true;
419                         qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
420                         break;
421                 default:
422                         break;
423                 }
424                 break;
425         case QED_IWARP_QP_STATE_ERROR:
426                 switch (new_state) {
427                 case QED_IWARP_QP_STATE_IDLE:
428
429                         qp->iwarp_state = new_state;
430                         break;
431                 case QED_IWARP_QP_STATE_CLOSING:
432                         /* could happen due to race... do nothing.... */
433                         break;
434                 default:
435                         rc = -EINVAL;
436                 }
437                 break;
438         case QED_IWARP_QP_STATE_TERMINATE:
439         case QED_IWARP_QP_STATE_CLOSING:
440                 qp->iwarp_state = new_state;
441                 break;
442         default:
443                 break;
444         }
445
446         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
447                    qp->icid,
448                    iwarp_state_names[prev_iw_state],
449                    iwarp_state_names[qp->iwarp_state],
450                    internal ? "internal" : "");
451
452         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
453
454         if (modify_fw)
455                 rc = qed_iwarp_modify_fw(p_hwfn, qp);
456
457         return rc;
458 }
459
460 int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
461 {
462         struct qed_sp_init_data init_data;
463         struct qed_spq_entry *p_ent;
464         int rc;
465
466         /* Get SPQ entry */
467         memset(&init_data, 0, sizeof(init_data));
468         init_data.cid = qp->icid;
469         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
470         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
471
472         rc = qed_sp_init_request(p_hwfn, &p_ent,
473                                  IWARP_RAMROD_CMD_ID_DESTROY_QP,
474                                  p_hwfn->p_rdma_info->proto, &init_data);
475         if (rc)
476                 return rc;
477
478         rc = qed_spq_post(p_hwfn, p_ent, NULL);
479
480         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
481
482         return rc;
483 }
484
485 static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
486                                  struct qed_iwarp_ep *ep,
487                                  bool remove_from_active_list)
488 {
489         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
490                           sizeof(*ep->ep_buffer_virt),
491                           ep->ep_buffer_virt, ep->ep_buffer_phys);
492
493         if (remove_from_active_list) {
494                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
495                 list_del(&ep->list_entry);
496                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
497         }
498
499         if (ep->qp)
500                 ep->qp->ep = NULL;
501
502         kfree(ep);
503 }
504
505 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
506 {
507         struct qed_iwarp_ep *ep = qp->ep;
508         int wait_count = 0;
509         int rc = 0;
510
511         if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
512                 rc = qed_iwarp_modify_qp(p_hwfn, qp,
513                                          QED_IWARP_QP_STATE_ERROR, false);
514                 if (rc)
515                         return rc;
516         }
517
518         /* Make sure ep is closed before returning and freeing memory. */
519         if (ep) {
520                 while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
521                        wait_count++ < 200)
522                         msleep(100);
523
524                 if (ep->state != QED_IWARP_EP_CLOSED)
525                         DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
526                                   ep->state);
527
528                 qed_iwarp_destroy_ep(p_hwfn, ep, false);
529         }
530
531         rc = qed_iwarp_fw_destroy(p_hwfn, qp);
532
533         if (qp->shared_queue)
534                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
535                                   IWARP_SHARED_QUEUE_PAGE_SIZE,
536                                   qp->shared_queue, qp->shared_queue_phys_addr);
537
538         return rc;
539 }
540
541 static int
542 qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
543 {
544         struct qed_iwarp_ep *ep;
545         int rc;
546
547         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
548         if (!ep)
549                 return -ENOMEM;
550
551         ep->state = QED_IWARP_EP_INIT;
552
553         ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
554                                                 sizeof(*ep->ep_buffer_virt),
555                                                 &ep->ep_buffer_phys,
556                                                 GFP_KERNEL);
557         if (!ep->ep_buffer_virt) {
558                 rc = -ENOMEM;
559                 goto err;
560         }
561
562         ep->sig = QED_EP_SIG;
563
564         *ep_out = ep;
565
566         return 0;
567
568 err:
569         kfree(ep);
570         return rc;
571 }
572
573 static void
574 qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
575                            struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
576 {
577         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
578                    p_tcp_ramrod->tcp.local_mac_addr_lo,
579                    p_tcp_ramrod->tcp.local_mac_addr_mid,
580                    p_tcp_ramrod->tcp.local_mac_addr_hi,
581                    p_tcp_ramrod->tcp.remote_mac_addr_lo,
582                    p_tcp_ramrod->tcp.remote_mac_addr_mid,
583                    p_tcp_ramrod->tcp.remote_mac_addr_hi);
584
585         if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
586                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
587                            "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
588                            p_tcp_ramrod->tcp.local_ip,
589                            p_tcp_ramrod->tcp.local_port,
590                            p_tcp_ramrod->tcp.remote_ip,
591                            p_tcp_ramrod->tcp.remote_port,
592                            p_tcp_ramrod->tcp.vlan_id);
593         } else {
594                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
595                            "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
596                            p_tcp_ramrod->tcp.local_ip,
597                            p_tcp_ramrod->tcp.local_port,
598                            p_tcp_ramrod->tcp.remote_ip,
599                            p_tcp_ramrod->tcp.remote_port,
600                            p_tcp_ramrod->tcp.vlan_id);
601         }
602
603         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
604                    "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
605                    p_tcp_ramrod->tcp.flow_label,
606                    p_tcp_ramrod->tcp.ttl,
607                    p_tcp_ramrod->tcp.tos_or_tc,
608                    p_tcp_ramrod->tcp.mss,
609                    p_tcp_ramrod->tcp.rcv_wnd_scale,
610                    p_tcp_ramrod->tcp.connect_mode,
611                    p_tcp_ramrod->tcp.flags);
612
613         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
614                    p_tcp_ramrod->tcp.syn_ip_payload_length,
615                    p_tcp_ramrod->tcp.syn_phy_addr_lo,
616                    p_tcp_ramrod->tcp.syn_phy_addr_hi);
617 }
618
619 static int
620 qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
621 {
622         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
623         struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
624         struct tcp_offload_params_opt2 *tcp;
625         struct qed_sp_init_data init_data;
626         struct qed_spq_entry *p_ent;
627         dma_addr_t async_output_phys;
628         dma_addr_t in_pdata_phys;
629         u16 physical_q;
630         u16 flags = 0;
631         u8 tcp_flags;
632         int rc;
633         int i;
634
635         memset(&init_data, 0, sizeof(init_data));
636         init_data.cid = ep->tcp_cid;
637         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
638         if (ep->connect_mode == TCP_CONNECT_PASSIVE)
639                 init_data.comp_mode = QED_SPQ_MODE_CB;
640         else
641                 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
642
643         rc = qed_sp_init_request(p_hwfn, &p_ent,
644                                  IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
645                                  PROTOCOLID_IWARP, &init_data);
646         if (rc)
647                 return rc;
648
649         p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
650
651         in_pdata_phys = ep->ep_buffer_phys +
652                         offsetof(struct qed_iwarp_ep_memory, in_pdata);
653         DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
654                        in_pdata_phys);
655
656         p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
657             cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
658
659         async_output_phys = ep->ep_buffer_phys +
660                             offsetof(struct qed_iwarp_ep_memory, async_output);
661         DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
662                        async_output_phys);
663
664         p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
665         p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
666
667         physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
668         p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
669         physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
670         p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
671         p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
672
673         tcp = &p_tcp_ramrod->tcp;
674         qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
675                             &tcp->remote_mac_addr_mid,
676                             &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
677         qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
678                             &tcp->local_mac_addr_lo, ep->local_mac_addr);
679
680         tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
681
682         tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
683
684         SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
685                   !!(tcp_flags & QED_IWARP_TS_EN));
686
687         SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
688                   !!(tcp_flags & QED_IWARP_DA_EN));
689
690         tcp->flags = cpu_to_le16(flags);
691         tcp->ip_version = ep->cm_info.ip_version;
692
693         for (i = 0; i < 4; i++) {
694                 tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
695                 tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
696         }
697
698         tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
699         tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
700         tcp->mss = cpu_to_le16(ep->mss);
701         tcp->flow_label = 0;
702         tcp->ttl = 0x40;
703         tcp->tos_or_tc = 0;
704
705         tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
706         tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss);
707         tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
708         tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT);
709         tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL);
710
711         tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
712         tcp->connect_mode = ep->connect_mode;
713
714         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
715                 tcp->syn_ip_payload_length =
716                         cpu_to_le16(ep->syn_ip_payload_length);
717                 tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
718                 tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
719         }
720
721         qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
722
723         rc = qed_spq_post(p_hwfn, p_ent, NULL);
724
725         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
726                    "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
727
728         return rc;
729 }
730
731 static void
732 qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
733 {
734         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
735         struct qed_iwarp_cm_event_params params;
736         struct mpa_v2_hdr *mpa_v2;
737         union async_output *async_data;
738         u16 mpa_ord, mpa_ird;
739         u8 mpa_hdr_size = 0;
740         u16 ulp_data_len;
741         u8 mpa_rev;
742
743         async_data = &ep->ep_buffer_virt->async_output;
744
745         mpa_rev = async_data->mpa_request.mpa_handshake_mode;
746         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
747                    "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
748                    async_data->mpa_request.ulp_data_len,
749                    mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
750
751         if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
752                 /* Read ord/ird values from private data buffer */
753                 mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
754                 mpa_hdr_size = sizeof(*mpa_v2);
755
756                 mpa_ord = ntohs(mpa_v2->ord);
757                 mpa_ird = ntohs(mpa_v2->ird);
758
759                 /* Temprary store in cm_info incoming ord/ird requested, later
760                  * replace with negotiated value during accept
761                  */
762                 ep->cm_info.ord = (u8)min_t(u16,
763                                             (mpa_ord & MPA_V2_IRD_ORD_MASK),
764                                             QED_IWARP_ORD_DEFAULT);
765
766                 ep->cm_info.ird = (u8)min_t(u16,
767                                             (mpa_ird & MPA_V2_IRD_ORD_MASK),
768                                             QED_IWARP_IRD_DEFAULT);
769
770                 /* Peer2Peer negotiation */
771                 ep->rtr_type = MPA_RTR_TYPE_NONE;
772                 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
773                         if (mpa_ord & MPA_V2_WRITE_RTR)
774                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
775
776                         if (mpa_ord & MPA_V2_READ_RTR)
777                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
778
779                         if (mpa_ird & MPA_V2_SEND_RTR)
780                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
781
782                         ep->rtr_type &= iwarp_info->rtr_type;
783
784                         /* if we're left with no match send our capabilities */
785                         if (ep->rtr_type == MPA_RTR_TYPE_NONE)
786                                 ep->rtr_type = iwarp_info->rtr_type;
787                 }
788
789                 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
790         } else {
791                 ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
792                 ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
793                 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
794         }
795
796         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
797                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
798                    mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
799                    async_data->mpa_request.ulp_data_len, mpa_hdr_size);
800
801         /* Strip mpa v2 hdr from private data before sending to upper layer */
802         ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
803
804         ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len);
805         ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size;
806
807         params.event = QED_IWARP_EVENT_MPA_REQUEST;
808         params.cm_info = &ep->cm_info;
809         params.ep_context = ep;
810         params.status = 0;
811
812         ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
813         ep->event_cb(ep->cb_context, &params);
814 }
815
816 static int
817 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
818 {
819         struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
820         struct mpa_outgoing_params *common;
821         struct qed_iwarp_info *iwarp_info;
822         struct qed_sp_init_data init_data;
823         dma_addr_t async_output_phys;
824         struct qed_spq_entry *p_ent;
825         dma_addr_t out_pdata_phys;
826         dma_addr_t in_pdata_phys;
827         struct qed_rdma_qp *qp;
828         bool reject;
829         u32 val;
830         int rc;
831
832         if (!ep)
833                 return -EINVAL;
834
835         qp = ep->qp;
836         reject = !qp;
837
838         memset(&init_data, 0, sizeof(init_data));
839         init_data.cid = reject ? ep->tcp_cid : qp->icid;
840         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
841
842         if (ep->connect_mode == TCP_CONNECT_ACTIVE)
843                 init_data.comp_mode = QED_SPQ_MODE_CB;
844         else
845                 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
846
847         rc = qed_sp_init_request(p_hwfn, &p_ent,
848                                  IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
849                                  PROTOCOLID_IWARP, &init_data);
850         if (rc)
851                 return rc;
852
853         p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
854         common = &p_mpa_ramrod->common;
855
856         out_pdata_phys = ep->ep_buffer_phys +
857                          offsetof(struct qed_iwarp_ep_memory, out_pdata);
858         DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys);
859
860         val = ep->cm_info.private_data_len;
861         common->outgoing_ulp_buffer.len = cpu_to_le16(val);
862         common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
863
864         common->out_rq.ord = cpu_to_le32(ep->cm_info.ord);
865         common->out_rq.ird = cpu_to_le32(ep->cm_info.ird);
866
867         val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
868         p_mpa_ramrod->tcp_cid = cpu_to_le32(val);
869
870         in_pdata_phys = ep->ep_buffer_phys +
871                         offsetof(struct qed_iwarp_ep_memory, in_pdata);
872         p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
873         DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
874                        in_pdata_phys);
875         p_mpa_ramrod->incoming_ulp_buffer.len =
876             cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
877         async_output_phys = ep->ep_buffer_phys +
878                             offsetof(struct qed_iwarp_ep_memory, async_output);
879         DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
880                        async_output_phys);
881         p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
882         p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
883
884         if (!reject) {
885                 DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
886                                qp->shared_queue_phys_addr);
887                 p_mpa_ramrod->stats_counter_id =
888                     RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
889         } else {
890                 common->reject = 1;
891         }
892
893         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
894         p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size);
895         p_mpa_ramrod->mode = ep->mpa_rev;
896         SET_FIELD(p_mpa_ramrod->rtr_pref,
897                   IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
898
899         ep->state = QED_IWARP_EP_MPA_OFFLOADED;
900         rc = qed_spq_post(p_hwfn, p_ent, NULL);
901         if (!reject)
902                 ep->cid = qp->icid;     /* Now they're migrated. */
903
904         DP_VERBOSE(p_hwfn,
905                    QED_MSG_RDMA,
906                    "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
907                    reject ? 0xffff : qp->icid,
908                    ep->tcp_cid,
909                    rc,
910                    ep->cm_info.ird,
911                    ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
912         return rc;
913 }
914
915 static void
916 qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
917 {
918         ep->state = QED_IWARP_EP_INIT;
919         if (ep->qp)
920                 ep->qp->ep = NULL;
921         ep->qp = NULL;
922         memset(&ep->cm_info, 0, sizeof(ep->cm_info));
923
924         if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
925                 /* We don't care about the return code, it's ok if tcp_cid
926                  * remains invalid...in this case we'll defer allocation
927                  */
928                 qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
929         }
930         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
931
932         list_move_tail(&ep->list_entry,
933                        &p_hwfn->p_rdma_info->iwarp.ep_free_list);
934
935         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
936 }
937
938 static void
939 qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
940 {
941         struct mpa_v2_hdr *mpa_v2_params;
942         union async_output *async_data;
943         u16 mpa_ird, mpa_ord;
944         u8 mpa_data_size = 0;
945         u16 ulp_data_len;
946
947         if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
948                 mpa_v2_params =
949                         (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
950                 mpa_data_size = sizeof(*mpa_v2_params);
951                 mpa_ird = ntohs(mpa_v2_params->ird);
952                 mpa_ord = ntohs(mpa_v2_params->ord);
953
954                 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
955                 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
956         }
957
958         async_data = &ep->ep_buffer_virt->async_output;
959         ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
960
961         ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len);
962         ep->cm_info.private_data_len = ulp_data_len - mpa_data_size;
963 }
964
965 static void
966 qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
967 {
968         struct qed_iwarp_cm_event_params params;
969
970         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
971                 DP_NOTICE(p_hwfn,
972                           "MPA reply event not expected on passive side!\n");
973                 return;
974         }
975
976         params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
977
978         qed_iwarp_parse_private_data(p_hwfn, ep);
979
980         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
981                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
982                    ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
983
984         params.cm_info = &ep->cm_info;
985         params.ep_context = ep;
986         params.status = 0;
987
988         ep->mpa_reply_processed = true;
989
990         ep->event_cb(ep->cb_context, &params);
991 }
992
993 #define QED_IWARP_CONNECT_MODE_STRING(ep) \
994         ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
995
996 /* Called as a result of the event:
997  * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
998  */
999 static void
1000 qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1001                        struct qed_iwarp_ep *ep, u8 fw_return_code)
1002 {
1003         struct qed_iwarp_cm_event_params params;
1004
1005         if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1006                 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1007         else
1008                 params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1009
1010         if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1011                 qed_iwarp_parse_private_data(p_hwfn, ep);
1012
1013         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1014                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1015                    ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1016
1017         params.cm_info = &ep->cm_info;
1018
1019         params.ep_context = ep;
1020
1021         switch (fw_return_code) {
1022         case RDMA_RETURN_OK:
1023                 ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1024                 ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1025                 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1026                 ep->state = QED_IWARP_EP_ESTABLISHED;
1027                 params.status = 0;
1028                 break;
1029         case IWARP_CONN_ERROR_MPA_TIMEOUT:
1030                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1031                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1032                 params.status = -EBUSY;
1033                 break;
1034         case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1035                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1036                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1037                 params.status = -ECONNREFUSED;
1038                 break;
1039         case IWARP_CONN_ERROR_MPA_RST:
1040                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1041                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1042                           ep->tcp_cid);
1043                 params.status = -ECONNRESET;
1044                 break;
1045         case IWARP_CONN_ERROR_MPA_FIN:
1046                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1047                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1048                 params.status = -ECONNREFUSED;
1049                 break;
1050         case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1051                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1052                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1053                 params.status = -ECONNREFUSED;
1054                 break;
1055         case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1056                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1057                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058                 params.status = -ECONNREFUSED;
1059                 break;
1060         case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1061                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1062                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063                 params.status = -ECONNREFUSED;
1064                 break;
1065         case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1066                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1067                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068                 params.status = -ECONNREFUSED;
1069                 break;
1070         case IWARP_CONN_ERROR_MPA_TERMINATE:
1071                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1072                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073                 params.status = -ECONNREFUSED;
1074                 break;
1075         default:
1076                 params.status = -ECONNRESET;
1077                 break;
1078         }
1079
1080         if (fw_return_code != RDMA_RETURN_OK)
1081                 /* paired with READ_ONCE in destroy_qp */
1082                 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1083
1084         ep->event_cb(ep->cb_context, &params);
1085
1086         /* on passive side, if there is no associated QP (REJECT) we need to
1087          * return the ep to the pool, (in the regular case we add an element
1088          * in accept instead of this one.
1089          * In both cases we need to remove it from the ep_list.
1090          */
1091         if (fw_return_code != RDMA_RETURN_OK) {
1092                 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1093                 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1094                     (!ep->qp)) {        /* Rejected */
1095                         qed_iwarp_return_ep(p_hwfn, ep);
1096                 } else {
1097                         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1098                         list_del(&ep->list_entry);
1099                         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1100                 }
1101         }
1102 }
1103
1104 static void
1105 qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1106                              struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1107 {
1108         struct mpa_v2_hdr *mpa_v2_params;
1109         u16 mpa_ird, mpa_ord;
1110
1111         *mpa_data_size = 0;
1112         if (MPA_REV2(ep->mpa_rev)) {
1113                 mpa_v2_params =
1114                     (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1115                 *mpa_data_size = sizeof(*mpa_v2_params);
1116
1117                 mpa_ird = (u16)ep->cm_info.ird;
1118                 mpa_ord = (u16)ep->cm_info.ord;
1119
1120                 if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1121                         mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1122
1123                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1124                                 mpa_ird |= MPA_V2_SEND_RTR;
1125
1126                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1127                                 mpa_ord |= MPA_V2_WRITE_RTR;
1128
1129                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1130                                 mpa_ord |= MPA_V2_READ_RTR;
1131                 }
1132
1133                 mpa_v2_params->ird = htons(mpa_ird);
1134                 mpa_v2_params->ord = htons(mpa_ord);
1135
1136                 DP_VERBOSE(p_hwfn,
1137                            QED_MSG_RDMA,
1138                            "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1139                            mpa_v2_params->ird,
1140                            mpa_v2_params->ord,
1141                            *((u32 *)mpa_v2_params),
1142                            mpa_ord & MPA_V2_IRD_ORD_MASK,
1143                            mpa_ird & MPA_V2_IRD_ORD_MASK,
1144                            !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1145                            !!(mpa_ird & MPA_V2_SEND_RTR),
1146                            !!(mpa_ord & MPA_V2_WRITE_RTR),
1147                            !!(mpa_ord & MPA_V2_READ_RTR));
1148         }
1149 }
1150
1151 int qed_iwarp_connect(void *rdma_cxt,
1152                       struct qed_iwarp_connect_in *iparams,
1153                       struct qed_iwarp_connect_out *oparams)
1154 {
1155         struct qed_hwfn *p_hwfn = rdma_cxt;
1156         struct qed_iwarp_info *iwarp_info;
1157         struct qed_iwarp_ep *ep;
1158         u8 mpa_data_size = 0;
1159         u32 cid;
1160         int rc;
1161
1162         if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1163             (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1164                 DP_NOTICE(p_hwfn,
1165                           "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1166                           iparams->qp->icid, iparams->cm_info.ord,
1167                           iparams->cm_info.ird);
1168
1169                 return -EINVAL;
1170         }
1171
1172         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1173
1174         /* Allocate ep object */
1175         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1176         if (rc)
1177                 return rc;
1178
1179         rc = qed_iwarp_create_ep(p_hwfn, &ep);
1180         if (rc)
1181                 goto err;
1182
1183         ep->tcp_cid = cid;
1184
1185         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1186         list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1187         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1188
1189         ep->qp = iparams->qp;
1190         ep->qp->ep = ep;
1191         ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1192         ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1193         memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1194
1195         ep->cm_info.ord = iparams->cm_info.ord;
1196         ep->cm_info.ird = iparams->cm_info.ird;
1197
1198         ep->rtr_type = iwarp_info->rtr_type;
1199         if (!iwarp_info->peer2peer)
1200                 ep->rtr_type = MPA_RTR_TYPE_NONE;
1201
1202         if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1203                 ep->cm_info.ord = 1;
1204
1205         ep->mpa_rev = iwarp_info->mpa_rev;
1206
1207         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1208
1209         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1210         ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1211                                        mpa_data_size;
1212
1213         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1214                iparams->cm_info.private_data,
1215                iparams->cm_info.private_data_len);
1216
1217         ep->mss = iparams->mss;
1218         ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1219
1220         ep->event_cb = iparams->event_cb;
1221         ep->cb_context = iparams->cb_context;
1222         ep->connect_mode = TCP_CONNECT_ACTIVE;
1223
1224         oparams->ep_context = ep;
1225
1226         rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1227
1228         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1229                    iparams->qp->icid, ep->tcp_cid, rc);
1230
1231         if (rc) {
1232                 qed_iwarp_destroy_ep(p_hwfn, ep, true);
1233                 goto err;
1234         }
1235
1236         return rc;
1237 err:
1238         qed_iwarp_cid_cleaned(p_hwfn, cid);
1239
1240         return rc;
1241 }
1242
1243 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1244 {
1245         struct qed_iwarp_ep *ep = NULL;
1246         int rc;
1247
1248         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1249
1250         if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1251                 DP_ERR(p_hwfn, "Ep list is empty\n");
1252                 goto out;
1253         }
1254
1255         ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1256                               struct qed_iwarp_ep, list_entry);
1257
1258         /* in some cases we could have failed allocating a tcp cid when added
1259          * from accept / failure... retry now..this is not the common case.
1260          */
1261         if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1262                 rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1263
1264                 /* if we fail we could look for another entry with a valid
1265                  * tcp_cid, but since we don't expect to reach this anyway
1266                  * it's not worth the handling
1267                  */
1268                 if (rc) {
1269                         ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1270                         ep = NULL;
1271                         goto out;
1272                 }
1273         }
1274
1275         list_del(&ep->list_entry);
1276
1277 out:
1278         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1279         return ep;
1280 }
1281
1282 #define QED_IWARP_MAX_CID_CLEAN_TIME  100
1283 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1284
1285 /* This function waits for all the bits of a bmap to be cleared, as long as
1286  * there is progress ( i.e. the number of bits left to be cleared decreases )
1287  * the function continues.
1288  */
1289 static int
1290 qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1291 {
1292         int prev_weight = 0;
1293         int wait_count = 0;
1294         int weight = 0;
1295
1296         weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1297         prev_weight = weight;
1298
1299         while (weight) {
1300                 /* If the HW device is during recovery, all resources are
1301                  * immediately reset without receiving a per-cid indication
1302                  * from HW. In this case we don't expect the cid_map to be
1303                  * cleared.
1304                  */
1305                 if (p_hwfn->cdev->recov_in_prog)
1306                         return 0;
1307
1308                 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1309
1310                 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1311
1312                 if (prev_weight == weight) {
1313                         wait_count++;
1314                 } else {
1315                         prev_weight = weight;
1316                         wait_count = 0;
1317                 }
1318
1319                 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1320                         DP_NOTICE(p_hwfn,
1321                                   "%s bitmap wait timed out (%d cids pending)\n",
1322                                   bmap->name, weight);
1323                         return -EBUSY;
1324                 }
1325         }
1326         return 0;
1327 }
1328
1329 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1330 {
1331         int rc;
1332         int i;
1333
1334         rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1335                                             &p_hwfn->p_rdma_info->tcp_cid_map);
1336         if (rc)
1337                 return rc;
1338
1339         /* Now free the tcp cids from the main cid map */
1340         for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1341                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1342
1343         /* Now wait for all cids to be completed */
1344         return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1345                                               &p_hwfn->p_rdma_info->cid_map);
1346 }
1347
1348 static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1349 {
1350         struct qed_iwarp_ep *ep;
1351
1352         while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1353                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1354
1355                 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1356                                       struct qed_iwarp_ep, list_entry);
1357
1358                 if (!ep) {
1359                         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1360                         break;
1361                 }
1362                 list_del(&ep->list_entry);
1363
1364                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1365
1366                 if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1367                         qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1368
1369                 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1370         }
1371 }
1372
1373 static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1374 {
1375         struct qed_iwarp_ep *ep;
1376         int rc = 0;
1377         int count;
1378         u32 cid;
1379         int i;
1380
1381         count = init ? QED_IWARP_PREALLOC_CNT : 1;
1382         for (i = 0; i < count; i++) {
1383                 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1384                 if (rc)
1385                         return rc;
1386
1387                 /* During initialization we allocate from the main pool,
1388                  * afterwards we allocate only from the tcp_cid.
1389                  */
1390                 if (init) {
1391                         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1392                         if (rc)
1393                                 goto err;
1394                         qed_iwarp_set_tcp_cid(p_hwfn, cid);
1395                 } else {
1396                         /* We don't care about the return code, it's ok if
1397                          * tcp_cid remains invalid...in this case we'll
1398                          * defer allocation
1399                          */
1400                         qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1401                 }
1402
1403                 ep->tcp_cid = cid;
1404
1405                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1406                 list_add_tail(&ep->list_entry,
1407                               &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1408                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1409         }
1410
1411         return rc;
1412
1413 err:
1414         qed_iwarp_destroy_ep(p_hwfn, ep, false);
1415
1416         return rc;
1417 }
1418
1419 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1420 {
1421         int rc;
1422
1423         /* Allocate bitmap for tcp cid. These are used by passive side
1424          * to ensure it can allocate a tcp cid during dpc that was
1425          * pre-acquired and doesn't require dynamic allocation of ilt
1426          */
1427         rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1428                                  QED_IWARP_PREALLOC_CNT, "TCP_CID");
1429         if (rc) {
1430                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1431                            "Failed to allocate tcp cid, rc = %d\n", rc);
1432                 return rc;
1433         }
1434
1435         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1436         spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1437
1438         rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1439         if (rc)
1440                 return rc;
1441
1442         return qed_ooo_alloc(p_hwfn);
1443 }
1444
1445 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1446 {
1447         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1448
1449         qed_ooo_free(p_hwfn);
1450         qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1451         kfree(iwarp_info->mpa_bufs);
1452         kfree(iwarp_info->partial_fpdus);
1453         kfree(iwarp_info->mpa_intermediate_buf);
1454 }
1455
1456 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1457 {
1458         struct qed_hwfn *p_hwfn = rdma_cxt;
1459         struct qed_iwarp_ep *ep;
1460         u8 mpa_data_size = 0;
1461         int rc;
1462
1463         ep = iparams->ep_context;
1464         if (!ep) {
1465                 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1466                 return -EINVAL;
1467         }
1468
1469         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1470                    iparams->qp->icid, ep->tcp_cid);
1471
1472         if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1473             (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1474                 DP_VERBOSE(p_hwfn,
1475                            QED_MSG_RDMA,
1476                            "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1477                            iparams->qp->icid,
1478                            ep->tcp_cid, iparams->ord, iparams->ord);
1479                 return -EINVAL;
1480         }
1481
1482         qed_iwarp_prealloc_ep(p_hwfn, false);
1483
1484         ep->cb_context = iparams->cb_context;
1485         ep->qp = iparams->qp;
1486         ep->qp->ep = ep;
1487
1488         if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1489                 /* Negotiate ord/ird: if upperlayer requested ord larger than
1490                  * ird advertised by remote, we need to decrease our ord
1491                  */
1492                 if (iparams->ord > ep->cm_info.ird)
1493                         iparams->ord = ep->cm_info.ird;
1494
1495                 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1496                     (iparams->ird == 0))
1497                         iparams->ird = 1;
1498         }
1499
1500         /* Update cm_info ord/ird to be negotiated values */
1501         ep->cm_info.ord = iparams->ord;
1502         ep->cm_info.ird = iparams->ird;
1503
1504         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1505
1506         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1507         ep->cm_info.private_data_len = iparams->private_data_len +
1508                                        mpa_data_size;
1509
1510         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1511                iparams->private_data, iparams->private_data_len);
1512
1513         rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1514         if (rc)
1515                 qed_iwarp_modify_qp(p_hwfn,
1516                                     iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1517
1518         return rc;
1519 }
1520
1521 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1522 {
1523         struct qed_hwfn *p_hwfn = rdma_cxt;
1524         struct qed_iwarp_ep *ep;
1525         u8 mpa_data_size = 0;
1526
1527         ep = iparams->ep_context;
1528         if (!ep) {
1529                 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1530                 return -EINVAL;
1531         }
1532
1533         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1534
1535         ep->cb_context = iparams->cb_context;
1536         ep->qp = NULL;
1537
1538         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1539
1540         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1541         ep->cm_info.private_data_len = iparams->private_data_len +
1542                                        mpa_data_size;
1543
1544         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1545                iparams->private_data, iparams->private_data_len);
1546
1547         return qed_iwarp_mpa_offload(p_hwfn, ep);
1548 }
1549
1550 static void
1551 qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1552                         struct qed_iwarp_cm_info *cm_info)
1553 {
1554         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1555                    cm_info->ip_version);
1556
1557         if (cm_info->ip_version == QED_TCP_IPV4)
1558                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1559                            "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1560                            cm_info->remote_ip, cm_info->remote_port,
1561                            cm_info->local_ip, cm_info->local_port,
1562                            cm_info->vlan);
1563         else
1564                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1565                            "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1566                            cm_info->remote_ip, cm_info->remote_port,
1567                            cm_info->local_ip, cm_info->local_port,
1568                            cm_info->vlan);
1569
1570         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1571                    "private_data_len = %x ord = %d, ird = %d\n",
1572                    cm_info->private_data_len, cm_info->ord, cm_info->ird);
1573 }
1574
1575 static int
1576 qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1577                       struct qed_iwarp_ll2_buff *buf, u8 handle)
1578 {
1579         int rc;
1580
1581         rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1582                                     (u16)buf->buff_size, buf, 1);
1583         if (rc) {
1584                 DP_NOTICE(p_hwfn,
1585                           "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1586                           rc, handle);
1587                 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1588                                   buf->data, buf->data_phys_addr);
1589                 kfree(buf);
1590         }
1591
1592         return rc;
1593 }
1594
1595 static bool
1596 qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1597 {
1598         struct qed_iwarp_ep *ep = NULL;
1599         bool found = false;
1600
1601         list_for_each_entry(ep,
1602                             &p_hwfn->p_rdma_info->iwarp.ep_list,
1603                             list_entry) {
1604                 if ((ep->cm_info.local_port == cm_info->local_port) &&
1605                     (ep->cm_info.remote_port == cm_info->remote_port) &&
1606                     (ep->cm_info.vlan == cm_info->vlan) &&
1607                     !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1608                             sizeof(cm_info->local_ip)) &&
1609                     !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1610                             sizeof(cm_info->remote_ip))) {
1611                         found = true;
1612                         break;
1613                 }
1614         }
1615
1616         if (found) {
1617                 DP_NOTICE(p_hwfn,
1618                           "SYN received on active connection - dropping\n");
1619                 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1620
1621                 return true;
1622         }
1623
1624         return false;
1625 }
1626
1627 static struct qed_iwarp_listener *
1628 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1629                        struct qed_iwarp_cm_info *cm_info)
1630 {
1631         struct qed_iwarp_listener *listener = NULL;
1632         static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1633         bool found = false;
1634
1635         list_for_each_entry(listener,
1636                             &p_hwfn->p_rdma_info->iwarp.listen_list,
1637                             list_entry) {
1638                 if (listener->port == cm_info->local_port) {
1639                         if (!memcmp(listener->ip_addr,
1640                                     ip_zero, sizeof(ip_zero))) {
1641                                 found = true;
1642                                 break;
1643                         }
1644
1645                         if (!memcmp(listener->ip_addr,
1646                                     cm_info->local_ip,
1647                                     sizeof(cm_info->local_ip)) &&
1648                             (listener->vlan == cm_info->vlan)) {
1649                                 found = true;
1650                                 break;
1651                         }
1652                 }
1653         }
1654
1655         if (found) {
1656                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1657                            listener);
1658                 return listener;
1659         }
1660
1661         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1662         return NULL;
1663 }
1664
1665 static int
1666 qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1667                        struct qed_iwarp_cm_info *cm_info,
1668                        void *buf,
1669                        u8 *remote_mac_addr,
1670                        u8 *local_mac_addr,
1671                        int *payload_len, int *tcp_start_offset)
1672 {
1673         struct vlan_ethhdr *vethh;
1674         bool vlan_valid = false;
1675         struct ipv6hdr *ip6h;
1676         struct ethhdr *ethh;
1677         struct tcphdr *tcph;
1678         struct iphdr *iph;
1679         int eth_hlen;
1680         int ip_hlen;
1681         int eth_type;
1682         int i;
1683
1684         ethh = buf;
1685         eth_type = ntohs(ethh->h_proto);
1686         if (eth_type == ETH_P_8021Q) {
1687                 vlan_valid = true;
1688                 vethh = (struct vlan_ethhdr *)ethh;
1689                 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1690                 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1691         }
1692
1693         eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1694
1695         if (!ether_addr_equal(ethh->h_dest,
1696                               p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1697                 DP_VERBOSE(p_hwfn,
1698                            QED_MSG_RDMA,
1699                            "Got unexpected mac %pM instead of %pM\n",
1700                            ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1701                 return -EINVAL;
1702         }
1703
1704         ether_addr_copy(remote_mac_addr, ethh->h_source);
1705         ether_addr_copy(local_mac_addr, ethh->h_dest);
1706
1707         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1708                    eth_type, ethh->h_source);
1709
1710         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1711                    eth_hlen, ethh->h_dest);
1712
1713         iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1714
1715         if (eth_type == ETH_P_IP) {
1716                 if (iph->protocol != IPPROTO_TCP) {
1717                         DP_NOTICE(p_hwfn,
1718                                   "Unexpected ip protocol on ll2 %x\n",
1719                                   iph->protocol);
1720                         return -EINVAL;
1721                 }
1722
1723                 cm_info->local_ip[0] = ntohl(iph->daddr);
1724                 cm_info->remote_ip[0] = ntohl(iph->saddr);
1725                 cm_info->ip_version = QED_TCP_IPV4;
1726
1727                 ip_hlen = (iph->ihl) * sizeof(u32);
1728                 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1729         } else if (eth_type == ETH_P_IPV6) {
1730                 ip6h = (struct ipv6hdr *)iph;
1731
1732                 if (ip6h->nexthdr != IPPROTO_TCP) {
1733                         DP_NOTICE(p_hwfn,
1734                                   "Unexpected ip protocol on ll2 %x\n",
1735                                   iph->protocol);
1736                         return -EINVAL;
1737                 }
1738
1739                 for (i = 0; i < 4; i++) {
1740                         cm_info->local_ip[i] =
1741                             ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1742                         cm_info->remote_ip[i] =
1743                             ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1744                 }
1745                 cm_info->ip_version = QED_TCP_IPV6;
1746
1747                 ip_hlen = sizeof(*ip6h);
1748                 *payload_len = ntohs(ip6h->payload_len);
1749         } else {
1750                 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1751                 return -EINVAL;
1752         }
1753
1754         tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1755
1756         if (!tcph->syn) {
1757                 DP_NOTICE(p_hwfn,
1758                           "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1759                           iph->ihl, tcph->source, tcph->dest);
1760                 return -EINVAL;
1761         }
1762
1763         cm_info->local_port = ntohs(tcph->dest);
1764         cm_info->remote_port = ntohs(tcph->source);
1765
1766         qed_iwarp_print_cm_info(p_hwfn, cm_info);
1767
1768         *tcp_start_offset = eth_hlen + ip_hlen;
1769
1770         return 0;
1771 }
1772
1773 static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1774                                                       u16 cid)
1775 {
1776         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1777         struct qed_iwarp_fpdu *partial_fpdu;
1778         u32 idx;
1779
1780         idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1781         if (idx >= iwarp_info->max_num_partial_fpdus) {
1782                 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1783                        iwarp_info->max_num_partial_fpdus);
1784                 return NULL;
1785         }
1786
1787         partial_fpdu = &iwarp_info->partial_fpdus[idx];
1788
1789         return partial_fpdu;
1790 }
1791
1792 enum qed_iwarp_mpa_pkt_type {
1793         QED_IWARP_MPA_PKT_PACKED,
1794         QED_IWARP_MPA_PKT_PARTIAL,
1795         QED_IWARP_MPA_PKT_UNALIGNED
1796 };
1797
1798 #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1799 #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1800 #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1801
1802 /* Pad to multiple of 4 */
1803 #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1804 #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)                              \
1805         (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +                      \
1806                                          QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1807                                          QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1808
1809 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1810 #define QED_IWARP_MAX_BDS_PER_FPDU 3
1811
1812 static const char * const pkt_type_str[] = {
1813         "QED_IWARP_MPA_PKT_PACKED",
1814         "QED_IWARP_MPA_PKT_PARTIAL",
1815         "QED_IWARP_MPA_PKT_UNALIGNED"
1816 };
1817
1818 static int
1819 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1820                       struct qed_iwarp_fpdu *fpdu,
1821                       struct qed_iwarp_ll2_buff *buf);
1822
1823 static enum qed_iwarp_mpa_pkt_type
1824 qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1825                        struct qed_iwarp_fpdu *fpdu,
1826                        u16 tcp_payload_len, u8 *mpa_data)
1827 {
1828         enum qed_iwarp_mpa_pkt_type pkt_type;
1829         u16 mpa_len;
1830
1831         if (fpdu->incomplete_bytes) {
1832                 pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1833                 goto out;
1834         }
1835
1836         /* special case of one byte remaining...
1837          * lower byte will be read next packet
1838          */
1839         if (tcp_payload_len == 1) {
1840                 fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1841                 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1842                 goto out;
1843         }
1844
1845         mpa_len = ntohs(*(__force __be16 *)mpa_data);
1846         fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1847
1848         if (fpdu->fpdu_length <= tcp_payload_len)
1849                 pkt_type = QED_IWARP_MPA_PKT_PACKED;
1850         else
1851                 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1852
1853 out:
1854         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1855                    "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1856                    pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1857
1858         return pkt_type;
1859 }
1860
1861 static void
1862 qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1863                     struct qed_iwarp_fpdu *fpdu,
1864                     struct unaligned_opaque_data *pkt_data,
1865                     u16 tcp_payload_size, u8 placement_offset)
1866 {
1867         u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1868
1869         fpdu->mpa_buf = buf;
1870         fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1871         fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1872         fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset;
1873         fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset;
1874
1875         if (tcp_payload_size == 1)
1876                 fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1877         else if (tcp_payload_size < fpdu->fpdu_length)
1878                 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1879         else
1880                 fpdu->incomplete_bytes = 0;     /* complete fpdu */
1881
1882         fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1883 }
1884
1885 static int
1886 qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1887                  struct qed_iwarp_fpdu *fpdu,
1888                  struct unaligned_opaque_data *pkt_data,
1889                  struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1890 {
1891         u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1892         u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1893         int rc;
1894
1895         /* need to copy the data from the partial packet stored in fpdu
1896          * to the new buf, for this we also need to move the data currently
1897          * placed on the buf. The assumption is that the buffer is big enough
1898          * since fpdu_length <= mss, we use an intermediate buffer since
1899          * we may need to copy the new data to an overlapping location
1900          */
1901         if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1902                 DP_ERR(p_hwfn,
1903                        "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1904                        buf->buff_size, fpdu->mpa_frag_len,
1905                        tcp_payload_size, fpdu->incomplete_bytes);
1906                 return -EINVAL;
1907         }
1908
1909         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1910                    "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1911                    fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1912                    (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
1913
1914         memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1915         memcpy(tmp_buf + fpdu->mpa_frag_len,
1916                (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
1917
1918         rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1919         if (rc)
1920                 return rc;
1921
1922         /* If we managed to post the buffer copy the data to the new buffer
1923          * o/w this will occur in the next round...
1924          */
1925         memcpy((u8 *)(buf->data), tmp_buf,
1926                fpdu->mpa_frag_len + tcp_payload_size);
1927
1928         fpdu->mpa_buf = buf;
1929         /* fpdu->pkt_hdr remains as is */
1930         /* fpdu->mpa_frag is overridden with new buf */
1931         fpdu->mpa_frag = buf->data_phys_addr;
1932         fpdu->mpa_frag_virt = buf->data;
1933         fpdu->mpa_frag_len += tcp_payload_size;
1934
1935         fpdu->incomplete_bytes -= tcp_payload_size;
1936
1937         DP_VERBOSE(p_hwfn,
1938                    QED_MSG_RDMA,
1939                    "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1940                    buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1941                    fpdu->incomplete_bytes);
1942
1943         return 0;
1944 }
1945
1946 static void
1947 qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1948                              struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1949 {
1950         u16 mpa_len;
1951
1952         /* Update incomplete packets if needed */
1953         if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1954                 /* Missing lower byte is now available */
1955                 mpa_len = fpdu->fpdu_length | *mpa_data;
1956                 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1957                 /* one byte of hdr */
1958                 fpdu->mpa_frag_len = 1;
1959                 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1960                 DP_VERBOSE(p_hwfn,
1961                            QED_MSG_RDMA,
1962                            "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1963                            mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1964         }
1965 }
1966
1967 #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1968         (GET_FIELD((_curr_pkt)->flags,     \
1969                    UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1970
1971 /* This function is used to recycle a buffer using the ll2 drop option. It
1972  * uses the mechanism to ensure that all buffers posted to tx before this one
1973  * were completed. The buffer sent here will be sent as a cookie in the tx
1974  * completion function and can then be reposted to rx chain when done. The flow
1975  * that requires this is the flow where a FPDU splits over more than 3 tcp
1976  * segments. In this case the driver needs to re-post a rx buffer instead of
1977  * the one received, but driver can't simply repost a buffer it copied from
1978  * as there is a case where the buffer was originally a packed FPDU, and is
1979  * partially posted to FW. Driver needs to ensure FW is done with it.
1980  */
1981 static int
1982 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1983                       struct qed_iwarp_fpdu *fpdu,
1984                       struct qed_iwarp_ll2_buff *buf)
1985 {
1986         struct qed_ll2_tx_pkt_info tx_pkt;
1987         u8 ll2_handle;
1988         int rc;
1989
1990         memset(&tx_pkt, 0, sizeof(tx_pkt));
1991         tx_pkt.num_of_bds = 1;
1992         tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1993         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1994         tx_pkt.first_frag = fpdu->pkt_hdr;
1995         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1996         buf->piggy_buf = NULL;
1997         tx_pkt.cookie = buf;
1998
1999         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2000
2001         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2002         if (rc)
2003                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2004                            "Can't drop packet rc=%d\n", rc);
2005
2006         DP_VERBOSE(p_hwfn,
2007                    QED_MSG_RDMA,
2008                    "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2009                    (unsigned long int)tx_pkt.first_frag,
2010                    tx_pkt.first_frag_len, buf, rc);
2011
2012         return rc;
2013 }
2014
2015 static int
2016 qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2017 {
2018         struct qed_ll2_tx_pkt_info tx_pkt;
2019         u8 ll2_handle;
2020         int rc;
2021
2022         memset(&tx_pkt, 0, sizeof(tx_pkt));
2023         tx_pkt.num_of_bds = 1;
2024         tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2025         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2026
2027         tx_pkt.first_frag = fpdu->pkt_hdr;
2028         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2029         tx_pkt.enable_ip_cksum = true;
2030         tx_pkt.enable_l4_cksum = true;
2031         tx_pkt.calc_ip_len = true;
2032         /* vlan overload with enum iwarp_ll2_tx_queues */
2033         tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2034
2035         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2036
2037         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2038         if (rc)
2039                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2040                            "Can't send right edge rc=%d\n", rc);
2041         DP_VERBOSE(p_hwfn,
2042                    QED_MSG_RDMA,
2043                    "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2044                    tx_pkt.num_of_bds,
2045                    (unsigned long int)tx_pkt.first_frag,
2046                    tx_pkt.first_frag_len, rc);
2047
2048         return rc;
2049 }
2050
2051 static int
2052 qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2053                     struct qed_iwarp_fpdu *fpdu,
2054                     struct unaligned_opaque_data *curr_pkt,
2055                     struct qed_iwarp_ll2_buff *buf,
2056                     u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2057 {
2058         struct qed_ll2_tx_pkt_info tx_pkt;
2059         u16 first_mpa_offset;
2060         u8 ll2_handle;
2061         int rc;
2062
2063         memset(&tx_pkt, 0, sizeof(tx_pkt));
2064
2065         /* An unaligned packet means it's split over two tcp segments. So the
2066          * complete packet requires 3 bds, one for the header, one for the
2067          * part of the fpdu of the first tcp segment, and the last fragment
2068          * will point to the remainder of the fpdu. A packed pdu, requires only
2069          * two bds, one for the header and one for the data.
2070          */
2071         tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2072         tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2073         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2074
2075         /* Send the mpa_buf only with the last fpdu (in case of packed) */
2076         if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2077             tcp_payload_size <= fpdu->fpdu_length)
2078                 tx_pkt.cookie = fpdu->mpa_buf;
2079
2080         tx_pkt.first_frag = fpdu->pkt_hdr;
2081         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2082         tx_pkt.enable_ip_cksum = true;
2083         tx_pkt.enable_l4_cksum = true;
2084         tx_pkt.calc_ip_len = true;
2085         /* vlan overload with enum iwarp_ll2_tx_queues */
2086         tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2087
2088         /* special case of unaligned packet and not packed, need to send
2089          * both buffers as cookie to release.
2090          */
2091         if (tcp_payload_size == fpdu->incomplete_bytes)
2092                 fpdu->mpa_buf->piggy_buf = buf;
2093
2094         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2095
2096         /* Set first fragment to header */
2097         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2098         if (rc)
2099                 goto out;
2100
2101         /* Set second fragment to first part of packet */
2102         rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2103                                                fpdu->mpa_frag,
2104                                                fpdu->mpa_frag_len);
2105         if (rc)
2106                 goto out;
2107
2108         if (!fpdu->incomplete_bytes)
2109                 goto out;
2110
2111         first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2112
2113         /* Set third fragment to second part of the packet */
2114         rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2115                                                ll2_handle,
2116                                                buf->data_phys_addr +
2117                                                first_mpa_offset,
2118                                                fpdu->incomplete_bytes);
2119 out:
2120         DP_VERBOSE(p_hwfn,
2121                    QED_MSG_RDMA,
2122                    "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2123                    tx_pkt.num_of_bds,
2124                    tx_pkt.first_frag_len,
2125                    fpdu->mpa_frag_len,
2126                    fpdu->incomplete_bytes, rc);
2127
2128         return rc;
2129 }
2130
2131 static void
2132 qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2133                        struct unaligned_opaque_data *curr_pkt,
2134                        u32 opaque_data0, u32 opaque_data1)
2135 {
2136         u64 opaque_data;
2137
2138         opaque_data = HILO_64(cpu_to_le32(opaque_data1),
2139                               cpu_to_le32(opaque_data0));
2140         *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2141
2142         le16_add_cpu(&curr_pkt->first_mpa_offset,
2143                      curr_pkt->tcp_payload_offset);
2144 }
2145
2146 /* This function is called when an unaligned or incomplete MPA packet arrives
2147  * driver needs to align the packet, perhaps using previous data and send
2148  * it down to FW once it is aligned.
2149  */
2150 static int
2151 qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2152                           struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2153 {
2154         struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2155         struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2156         enum qed_iwarp_mpa_pkt_type pkt_type;
2157         struct qed_iwarp_fpdu *fpdu;
2158         u16 cid, first_mpa_offset;
2159         int rc = -EINVAL;
2160         u8 *mpa_data;
2161
2162         cid = le32_to_cpu(curr_pkt->cid);
2163
2164         fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2165         if (!fpdu) { /* something corrupt with cid, post rx back */
2166                 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2167                        cid);
2168                 goto err;
2169         }
2170
2171         do {
2172                 first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2173                 mpa_data = ((u8 *)(buf->data) + first_mpa_offset);
2174
2175                 pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2176                                                   mpa_buf->tcp_payload_len,
2177                                                   mpa_data);
2178
2179                 switch (pkt_type) {
2180                 case QED_IWARP_MPA_PKT_PARTIAL:
2181                         qed_iwarp_init_fpdu(buf, fpdu,
2182                                             curr_pkt,
2183                                             mpa_buf->tcp_payload_len,
2184                                             mpa_buf->placement_offset);
2185
2186                         if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2187                                 mpa_buf->tcp_payload_len = 0;
2188                                 break;
2189                         }
2190
2191                         rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2192
2193                         if (rc) {
2194                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2195                                            "Can't send FPDU:reset rc=%d\n", rc);
2196                                 memset(fpdu, 0, sizeof(*fpdu));
2197                                 break;
2198                         }
2199
2200                         mpa_buf->tcp_payload_len = 0;
2201                         break;
2202                 case QED_IWARP_MPA_PKT_PACKED:
2203                         qed_iwarp_init_fpdu(buf, fpdu,
2204                                             curr_pkt,
2205                                             mpa_buf->tcp_payload_len,
2206                                             mpa_buf->placement_offset);
2207
2208                         rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2209                                                  mpa_buf->tcp_payload_len,
2210                                                  pkt_type);
2211                         if (rc) {
2212                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2213                                            "Can't send FPDU:reset rc=%d\n", rc);
2214                                 memset(fpdu, 0, sizeof(*fpdu));
2215                                 break;
2216                         }
2217
2218                         mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2219                         le16_add_cpu(&curr_pkt->first_mpa_offset,
2220                                      fpdu->fpdu_length);
2221                         break;
2222                 case QED_IWARP_MPA_PKT_UNALIGNED:
2223                         qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2224                         if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2225                                 /* special handling of fpdu split over more
2226                                  * than 2 segments
2227                                  */
2228                                 if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2229                                         rc = qed_iwarp_win_right_edge(p_hwfn,
2230                                                                       fpdu);
2231                                         /* packet will be re-processed later */
2232                                         if (rc)
2233                                                 return rc;
2234                                 }
2235
2236                                 rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2237                                                       buf,
2238                                                       mpa_buf->tcp_payload_len);
2239                                 if (rc) /* packet will be re-processed later */
2240                                         return rc;
2241
2242                                 mpa_buf->tcp_payload_len = 0;
2243                                 break;
2244                         }
2245
2246                         rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2247                                                  mpa_buf->tcp_payload_len,
2248                                                  pkt_type);
2249                         if (rc) {
2250                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2251                                            "Can't send FPDU:delay rc=%d\n", rc);
2252                                 /* don't reset fpdu -> we need it for next
2253                                  * classify
2254                                  */
2255                                 break;
2256                         }
2257
2258                         mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2259                         le16_add_cpu(&curr_pkt->first_mpa_offset,
2260                                      fpdu->incomplete_bytes);
2261
2262                         /* The framed PDU was sent - no more incomplete bytes */
2263                         fpdu->incomplete_bytes = 0;
2264                         break;
2265                 }
2266         } while (mpa_buf->tcp_payload_len && !rc);
2267
2268         return rc;
2269
2270 err:
2271         qed_iwarp_ll2_post_rx(p_hwfn,
2272                               buf,
2273                               p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2274         return rc;
2275 }
2276
2277 static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2278 {
2279         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2280         struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2281         int rc;
2282
2283         while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2284                 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2285                                            struct qed_iwarp_ll2_mpa_buf,
2286                                            list_entry);
2287
2288                 rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2289
2290                 /* busy means break and continue processing later, don't
2291                  * remove the buf from the pending list.
2292                  */
2293                 if (rc == -EBUSY)
2294                         break;
2295
2296                 list_move_tail(&mpa_buf->list_entry,
2297                                &iwarp_info->mpa_buf_list);
2298
2299                 if (rc) {       /* different error, don't continue */
2300                         DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2301                         break;
2302                 }
2303         }
2304 }
2305
2306 static void
2307 qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2308 {
2309         struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2310         struct qed_iwarp_info *iwarp_info;
2311         struct qed_hwfn *p_hwfn = cxt;
2312         u16 first_mpa_offset;
2313
2314         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2315         mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2316                                    struct qed_iwarp_ll2_mpa_buf, list_entry);
2317         if (!mpa_buf) {
2318                 DP_ERR(p_hwfn, "No free mpa buf\n");
2319                 goto err;
2320         }
2321
2322         list_del(&mpa_buf->list_entry);
2323         qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2324                                data->opaque_data_0, data->opaque_data_1);
2325
2326         first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset);
2327
2328         DP_VERBOSE(p_hwfn,
2329                    QED_MSG_RDMA,
2330                    "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2331                    data->length.packet_length, first_mpa_offset,
2332                    mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2333                    mpa_buf->data.cid);
2334
2335         mpa_buf->ll2_buf = data->cookie;
2336         mpa_buf->tcp_payload_len = data->length.packet_length -
2337                                    first_mpa_offset;
2338
2339         first_mpa_offset += data->u.placement_offset;
2340         mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset);
2341         mpa_buf->placement_offset = data->u.placement_offset;
2342
2343         list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2344
2345         qed_iwarp_process_pending_pkts(p_hwfn);
2346         return;
2347 err:
2348         qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2349                               iwarp_info->ll2_mpa_handle);
2350 }
2351
2352 static void
2353 qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2354 {
2355         struct qed_iwarp_ll2_buff *buf = data->cookie;
2356         struct qed_iwarp_listener *listener;
2357         struct qed_ll2_tx_pkt_info tx_pkt;
2358         struct qed_iwarp_cm_info cm_info;
2359         struct qed_hwfn *p_hwfn = cxt;
2360         u8 remote_mac_addr[ETH_ALEN];
2361         u8 local_mac_addr[ETH_ALEN];
2362         struct qed_iwarp_ep *ep;
2363         int tcp_start_offset;
2364         u8 ll2_syn_handle;
2365         int payload_len;
2366         u32 hdr_size;
2367         int rc;
2368
2369         memset(&cm_info, 0, sizeof(cm_info));
2370         ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2371
2372         /* Check if packet was received with errors... */
2373         if (data->err_flags) {
2374                 DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2375                           data->err_flags);
2376                 goto err;
2377         }
2378
2379         if (GET_FIELD(data->parse_flags,
2380                       PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2381             GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2382                 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2383                 goto err;
2384         }
2385
2386         rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2387                                     data->u.placement_offset, remote_mac_addr,
2388                                     local_mac_addr, &payload_len,
2389                                     &tcp_start_offset);
2390         if (rc)
2391                 goto err;
2392
2393         /* Check if there is a listener for this 4-tuple+vlan */
2394         listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2395         if (!listener) {
2396                 DP_VERBOSE(p_hwfn,
2397                            QED_MSG_RDMA,
2398                            "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2399                            data->parse_flags, data->length.packet_length);
2400
2401                 memset(&tx_pkt, 0, sizeof(tx_pkt));
2402                 tx_pkt.num_of_bds = 1;
2403                 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2404                 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2405                 tx_pkt.first_frag = buf->data_phys_addr +
2406                                     data->u.placement_offset;
2407                 tx_pkt.first_frag_len = data->length.packet_length;
2408                 tx_pkt.cookie = buf;
2409
2410                 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2411                                                &tx_pkt, true);
2412
2413                 if (rc) {
2414                         DP_NOTICE(p_hwfn,
2415                                   "Can't post SYN back to chip rc=%d\n", rc);
2416                         goto err;
2417                 }
2418                 return;
2419         }
2420
2421         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2422         /* There may be an open ep on this connection if this is a syn
2423          * retrasnmit... need to make sure there isn't...
2424          */
2425         if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2426                 goto err;
2427
2428         ep = qed_iwarp_get_free_ep(p_hwfn);
2429         if (!ep)
2430                 goto err;
2431
2432         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2433         list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2434         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2435
2436         ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2437         ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2438
2439         memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2440
2441         hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
2442         ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2443         ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2444
2445         ep->event_cb = listener->event_cb;
2446         ep->cb_context = listener->cb_context;
2447         ep->connect_mode = TCP_CONNECT_PASSIVE;
2448
2449         ep->syn = buf;
2450         ep->syn_ip_payload_length = (u16)payload_len;
2451         ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2452                            tcp_start_offset;
2453
2454         rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2455         if (rc) {
2456                 qed_iwarp_return_ep(p_hwfn, ep);
2457                 goto err;
2458         }
2459
2460         return;
2461 err:
2462         qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2463 }
2464
2465 static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2466                                      void *cookie, dma_addr_t rx_buf_addr,
2467                                      bool b_last_packet)
2468 {
2469         struct qed_iwarp_ll2_buff *buffer = cookie;
2470         struct qed_hwfn *p_hwfn = cxt;
2471
2472         dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2473                           buffer->data, buffer->data_phys_addr);
2474         kfree(buffer);
2475 }
2476
2477 static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2478                                       void *cookie, dma_addr_t first_frag_addr,
2479                                       bool b_last_fragment, bool b_last_packet)
2480 {
2481         struct qed_iwarp_ll2_buff *buffer = cookie;
2482         struct qed_iwarp_ll2_buff *piggy;
2483         struct qed_hwfn *p_hwfn = cxt;
2484
2485         if (!buffer)            /* can happen in packed mpa unaligned... */
2486                 return;
2487
2488         /* this was originally an rx packet, post it back */
2489         piggy = buffer->piggy_buf;
2490         if (piggy) {
2491                 buffer->piggy_buf = NULL;
2492                 qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2493         }
2494
2495         qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2496
2497         if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2498                 qed_iwarp_process_pending_pkts(p_hwfn);
2499
2500         return;
2501 }
2502
2503 static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2504                                      void *cookie, dma_addr_t first_frag_addr,
2505                                      bool b_last_fragment, bool b_last_packet)
2506 {
2507         struct qed_iwarp_ll2_buff *buffer = cookie;
2508         struct qed_hwfn *p_hwfn = cxt;
2509
2510         if (!buffer)
2511                 return;
2512
2513         if (buffer->piggy_buf) {
2514                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2515                                   buffer->piggy_buf->buff_size,
2516                                   buffer->piggy_buf->data,
2517                                   buffer->piggy_buf->data_phys_addr);
2518
2519                 kfree(buffer->piggy_buf);
2520         }
2521
2522         dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2523                           buffer->data, buffer->data_phys_addr);
2524
2525         kfree(buffer);
2526 }
2527
2528 /* The only slowpath for iwarp ll2 is unalign flush. When this completion
2529  * is received, need to reset the FPDU.
2530  */
2531 static void
2532 qed_iwarp_ll2_slowpath(void *cxt,
2533                        u8 connection_handle,
2534                        u32 opaque_data_0, u32 opaque_data_1)
2535 {
2536         struct unaligned_opaque_data unalign_data;
2537         struct qed_hwfn *p_hwfn = cxt;
2538         struct qed_iwarp_fpdu *fpdu;
2539         u32 cid;
2540
2541         qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2542                                opaque_data_0, opaque_data_1);
2543
2544         cid = le32_to_cpu(unalign_data.cid);
2545
2546         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid);
2547
2548         fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2549         if (fpdu)
2550                 memset(fpdu, 0, sizeof(*fpdu));
2551 }
2552
2553 static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
2554 {
2555         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2556         int rc = 0;
2557
2558         if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2559                 rc = qed_ll2_terminate_connection(p_hwfn,
2560                                                   iwarp_info->ll2_syn_handle);
2561                 if (rc)
2562                         DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2563
2564                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2565                 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2566         }
2567
2568         if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2569                 rc = qed_ll2_terminate_connection(p_hwfn,
2570                                                   iwarp_info->ll2_ooo_handle);
2571                 if (rc)
2572                         DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2573
2574                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2575                 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2576         }
2577
2578         if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2579                 rc = qed_ll2_terminate_connection(p_hwfn,
2580                                                   iwarp_info->ll2_mpa_handle);
2581                 if (rc)
2582                         DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2583
2584                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2585                 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2586         }
2587
2588         qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
2589                                   p_hwfn->p_rdma_info->iwarp.mac_addr);
2590
2591         return rc;
2592 }
2593
2594 static int
2595 qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2596                             int num_rx_bufs, int buff_size, u8 ll2_handle)
2597 {
2598         struct qed_iwarp_ll2_buff *buffer;
2599         int rc = 0;
2600         int i;
2601
2602         for (i = 0; i < num_rx_bufs; i++) {
2603                 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2604                 if (!buffer) {
2605                         rc = -ENOMEM;
2606                         break;
2607                 }
2608
2609                 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2610                                                   buff_size,
2611                                                   &buffer->data_phys_addr,
2612                                                   GFP_KERNEL);
2613                 if (!buffer->data) {
2614                         kfree(buffer);
2615                         rc = -ENOMEM;
2616                         break;
2617                 }
2618
2619                 buffer->buff_size = buff_size;
2620                 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2621                 if (rc)
2622                         /* buffers will be deallocated by qed_ll2 */
2623                         break;
2624         }
2625         return rc;
2626 }
2627
2628 #define QED_IWARP_MAX_BUF_SIZE(mtu)                                  \
2629         ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2630                 ETH_CACHE_LINE_SIZE)
2631
2632 static int
2633 qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2634                     struct qed_rdma_start_in_params *params,
2635                     u32 rcv_wnd_size)
2636 {
2637         struct qed_iwarp_info *iwarp_info;
2638         struct qed_ll2_acquire_data data;
2639         struct qed_ll2_cbs cbs;
2640         u32 buff_size;
2641         u16 n_ooo_bufs;
2642         int rc = 0;
2643         int i;
2644
2645         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2646         iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2647         iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2648         iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2649
2650         iwarp_info->max_mtu = params->max_mtu;
2651
2652         ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2653
2654         rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2655         if (rc)
2656                 return rc;
2657
2658         /* Start SYN connection */
2659         cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2660         cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2661         cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2662         cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2663         cbs.slowpath_cb = NULL;
2664         cbs.cookie = p_hwfn;
2665
2666         memset(&data, 0, sizeof(data));
2667         data.input.conn_type = QED_LL2_TYPE_IWARP;
2668         /* SYN will use ctx based queues */
2669         data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
2670         data.input.mtu = params->max_mtu;
2671         data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2672         data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2673         data.input.tx_max_bds_per_packet = 1;   /* will never be fragmented */
2674         data.input.tx_tc = PKT_LB_TC;
2675         data.input.tx_dest = QED_LL2_TX_DEST_LB;
2676         data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2677         data.cbs = &cbs;
2678
2679         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2680         if (rc) {
2681                 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2682                 qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2683                 return rc;
2684         }
2685
2686         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2687         if (rc) {
2688                 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2689                 goto err;
2690         }
2691
2692         buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2693         rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2694                                          QED_IWARP_LL2_SYN_RX_SIZE,
2695                                          buff_size,
2696                                          iwarp_info->ll2_syn_handle);
2697         if (rc)
2698                 goto err;
2699
2700         /* Start OOO connection */
2701         data.input.conn_type = QED_LL2_TYPE_OOO;
2702         /* OOO/unaligned will use legacy ll2 queues (ram based) */
2703         data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
2704         data.input.mtu = params->max_mtu;
2705
2706         n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
2707                      iwarp_info->max_mtu;
2708         n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2709
2710         data.input.rx_num_desc = n_ooo_bufs;
2711         data.input.rx_num_ooo_buffers = n_ooo_bufs;
2712
2713         data.input.tx_max_bds_per_packet = 1;   /* will never be fragmented */
2714         data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2715         data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2716
2717         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2718         if (rc)
2719                 goto err;
2720
2721         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2722         if (rc)
2723                 goto err;
2724
2725         /* Start Unaligned MPA connection */
2726         cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2727         cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2728
2729         memset(&data, 0, sizeof(data));
2730         data.input.conn_type = QED_LL2_TYPE_IWARP;
2731         data.input.mtu = params->max_mtu;
2732         /* FW requires that once a packet arrives OOO, it must have at
2733          * least 2 rx buffers available on the unaligned connection
2734          * for handling the case that it is a partial fpdu.
2735          */
2736         data.input.rx_num_desc = n_ooo_bufs * 2;
2737         data.input.tx_num_desc = data.input.rx_num_desc;
2738         data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2739         data.input.tx_tc = PKT_LB_TC;
2740         data.input.tx_dest = QED_LL2_TX_DEST_LB;
2741         data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2742         data.input.secondary_queue = true;
2743         data.cbs = &cbs;
2744
2745         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2746         if (rc)
2747                 goto err;
2748
2749         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2750         if (rc)
2751                 goto err;
2752
2753         rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2754                                          data.input.rx_num_desc,
2755                                          buff_size,
2756                                          iwarp_info->ll2_mpa_handle);
2757         if (rc)
2758                 goto err;
2759
2760         iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2761                                             sizeof(*iwarp_info->partial_fpdus),
2762                                             GFP_KERNEL);
2763         if (!iwarp_info->partial_fpdus) {
2764                 rc = -ENOMEM;
2765                 goto err;
2766         }
2767
2768         iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2769
2770         iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2771         if (!iwarp_info->mpa_intermediate_buf) {
2772                 rc = -ENOMEM;
2773                 goto err;
2774         }
2775
2776         /* The mpa_bufs array serves for pending RX packets received on the
2777          * mpa ll2 that don't have place on the tx ring and require later
2778          * processing. We can't fail on allocation of such a struct therefore
2779          * we allocate enough to take care of all rx packets
2780          */
2781         iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2782                                        sizeof(*iwarp_info->mpa_bufs),
2783                                        GFP_KERNEL);
2784         if (!iwarp_info->mpa_bufs) {
2785                 rc = -ENOMEM;
2786                 goto err;
2787         }
2788
2789         INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2790         INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2791         for (i = 0; i < data.input.rx_num_desc; i++)
2792                 list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2793                               &iwarp_info->mpa_buf_list);
2794         return rc;
2795 err:
2796         qed_iwarp_ll2_stop(p_hwfn);
2797
2798         return rc;
2799 }
2800
2801 static struct {
2802         u32 two_ports;
2803         u32 four_ports;
2804 } qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
2805         {QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
2806         {QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
2807 };
2808
2809 int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
2810                     struct qed_rdma_start_in_params *params)
2811 {
2812         struct qed_dev *cdev = p_hwfn->cdev;
2813         struct qed_iwarp_info *iwarp_info;
2814         enum chip_ids chip_id;
2815         u32 rcv_wnd_size;
2816
2817         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2818
2819         iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2820
2821         chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
2822         rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
2823                 qed_iwarp_rcv_wnd_size[chip_id].four_ports :
2824                 qed_iwarp_rcv_wnd_size[chip_id].two_ports;
2825
2826         /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2827         iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2828             ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2829         iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2830         iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2831         iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2832
2833         iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2834
2835         iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2836                                 MPA_RTR_TYPE_ZERO_WRITE |
2837                                 MPA_RTR_TYPE_ZERO_READ;
2838
2839         spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2840         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2841         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2842
2843         qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2844                                   qed_iwarp_async_event);
2845         qed_ooo_setup(p_hwfn);
2846
2847         return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
2848 }
2849
2850 int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
2851 {
2852         int rc;
2853
2854         qed_iwarp_free_prealloc_ep(p_hwfn);
2855         rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2856         if (rc)
2857                 return rc;
2858
2859         return qed_iwarp_ll2_stop(p_hwfn);
2860 }
2861
2862 static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2863                                   struct qed_iwarp_ep *ep,
2864                                   u8 fw_return_code)
2865 {
2866         struct qed_iwarp_cm_event_params params;
2867
2868         qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2869
2870         params.event = QED_IWARP_EVENT_CLOSE;
2871         params.ep_context = ep;
2872         params.cm_info = &ep->cm_info;
2873         params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2874                          0 : -ECONNRESET;
2875
2876         /* paired with READ_ONCE in destroy_qp */
2877         smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2878
2879         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2880         list_del(&ep->list_entry);
2881         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2882
2883         ep->event_cb(ep->cb_context, &params);
2884 }
2885
2886 static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2887                                          struct qed_iwarp_ep *ep,
2888                                          int fw_ret_code)
2889 {
2890         struct qed_iwarp_cm_event_params params;
2891         bool event_cb = false;
2892
2893         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2894                    ep->cid, fw_ret_code);
2895
2896         switch (fw_ret_code) {
2897         case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2898                 params.status = 0;
2899                 params.event = QED_IWARP_EVENT_DISCONNECT;
2900                 event_cb = true;
2901                 break;
2902         case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2903                 params.status = -ECONNRESET;
2904                 params.event = QED_IWARP_EVENT_DISCONNECT;
2905                 event_cb = true;
2906                 break;
2907         case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2908                 params.event = QED_IWARP_EVENT_RQ_EMPTY;
2909                 event_cb = true;
2910                 break;
2911         case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2912                 params.event = QED_IWARP_EVENT_IRQ_FULL;
2913                 event_cb = true;
2914                 break;
2915         case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2916                 params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2917                 event_cb = true;
2918                 break;
2919         case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2920                 params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2921                 event_cb = true;
2922                 break;
2923         case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2924                 params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2925                 event_cb = true;
2926                 break;
2927         case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2928                 params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2929                 event_cb = true;
2930                 break;
2931         case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2932                 params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2933                 event_cb = true;
2934                 break;
2935         case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2936                 params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2937                 event_cb = true;
2938                 break;
2939         case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2940                 params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2941                 event_cb = true;
2942                 break;
2943         default:
2944                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2945                            "Unhandled exception received...fw_ret_code=%d\n",
2946                            fw_ret_code);
2947                 break;
2948         }
2949
2950         if (event_cb) {
2951                 params.ep_context = ep;
2952                 params.cm_info = &ep->cm_info;
2953                 ep->event_cb(ep->cb_context, &params);
2954         }
2955 }
2956
2957 static void
2958 qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2959                                    struct qed_iwarp_ep *ep, u8 fw_return_code)
2960 {
2961         struct qed_iwarp_cm_event_params params;
2962
2963         memset(&params, 0, sizeof(params));
2964         params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2965         params.ep_context = ep;
2966         params.cm_info = &ep->cm_info;
2967         /* paired with READ_ONCE in destroy_qp */
2968         smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2969
2970         switch (fw_return_code) {
2971         case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2972                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2973                            "%s(0x%x) TCP connect got invalid packet\n",
2974                            QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2975                 params.status = -ECONNRESET;
2976                 break;
2977         case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2978                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2979                            "%s(0x%x) TCP Connection Reset\n",
2980                            QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2981                 params.status = -ECONNRESET;
2982                 break;
2983         case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2984                 DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2985                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2986                 params.status = -EBUSY;
2987                 break;
2988         case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2989                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2990                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2991                 params.status = -ECONNREFUSED;
2992                 break;
2993         case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2994                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2995                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2996                 params.status = -ECONNRESET;
2997                 break;
2998         default:
2999                 DP_ERR(p_hwfn,
3000                        "%s(0x%x) Unexpected return code tcp connect: %d\n",
3001                        QED_IWARP_CONNECT_MODE_STRING(ep),
3002                        ep->tcp_cid, fw_return_code);
3003                 params.status = -ECONNRESET;
3004                 break;
3005         }
3006
3007         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3008                 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
3009                 qed_iwarp_return_ep(p_hwfn, ep);
3010         } else {
3011                 ep->event_cb(ep->cb_context, &params);
3012                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3013                 list_del(&ep->list_entry);
3014                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3015         }
3016 }
3017
3018 static void
3019 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
3020                            struct qed_iwarp_ep *ep, u8 fw_return_code)
3021 {
3022         u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
3023
3024         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3025                 /* Done with the SYN packet, post back to ll2 rx */
3026                 qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
3027
3028                 ep->syn = NULL;
3029
3030                 /* If connect failed - upper layer doesn't know about it */
3031                 if (fw_return_code == RDMA_RETURN_OK)
3032                         qed_iwarp_mpa_received(p_hwfn, ep);
3033                 else
3034                         qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3035                                                            fw_return_code);
3036         } else {
3037                 if (fw_return_code == RDMA_RETURN_OK)
3038                         qed_iwarp_mpa_offload(p_hwfn, ep);
3039                 else
3040                         qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3041                                                            fw_return_code);
3042         }
3043 }
3044
3045 static inline bool
3046 qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
3047 {
3048         if (!ep || (ep->sig != QED_EP_SIG)) {
3049                 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3050                 return false;
3051         }
3052
3053         return true;
3054 }
3055
3056 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
3057                                  __le16 echo, union event_ring_data *data,
3058                                  u8 fw_return_code)
3059 {
3060         struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3061         struct regpair *fw_handle = &data->rdma_data.async_handle;
3062         struct qed_iwarp_ep *ep = NULL;
3063         u16 srq_offset;
3064         u16 srq_id;
3065         u16 cid;
3066
3067         ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3068                                                        fw_handle->lo);
3069
3070         switch (fw_event_code) {
3071         case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3072                 /* Async completion after TCP 3-way handshake */
3073                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3074                         return -EINVAL;
3075                 DP_VERBOSE(p_hwfn,
3076                            QED_MSG_RDMA,
3077                            "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3078                            ep->tcp_cid, fw_return_code);
3079                 qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3080                 break;
3081         case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3082                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3083                         return -EINVAL;
3084                 DP_VERBOSE(p_hwfn,
3085                            QED_MSG_RDMA,
3086                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3087                            ep->cid, fw_return_code);
3088                 qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3089                 break;
3090         case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3091                 /* Async completion for Close Connection ramrod */
3092                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3093                         return -EINVAL;
3094                 DP_VERBOSE(p_hwfn,
3095                            QED_MSG_RDMA,
3096                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3097                            ep->cid, fw_return_code);
3098                 qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3099                 break;
3100         case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3101                 /* Async event for active side only */
3102                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3103                         return -EINVAL;
3104                 DP_VERBOSE(p_hwfn,
3105                            QED_MSG_RDMA,
3106                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3107                            ep->cid, fw_return_code);
3108                 qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3109                 break;
3110         case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3111                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3112                         return -EINVAL;
3113                 DP_VERBOSE(p_hwfn,
3114                            QED_MSG_RDMA,
3115                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3116                            ep->cid, fw_return_code);
3117                 qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3118                 break;
3119         case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3120                 cid = (u16)le32_to_cpu(fw_handle->lo);
3121                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3122                            "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3123                 qed_iwarp_cid_cleaned(p_hwfn, cid);
3124
3125                 break;
3126         case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3127                 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3128                 srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3129                 /* FW assigns value that is no greater than u16 */
3130                 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3131                 events.affiliated_event(events.context,
3132                                         QED_IWARP_EVENT_SRQ_EMPTY,
3133                                         &srq_id);
3134                 break;
3135         case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3136                 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3137                 srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3138                 /* FW assigns value that is no greater than u16 */
3139                 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3140                 events.affiliated_event(events.context,
3141                                         QED_IWARP_EVENT_SRQ_LIMIT,
3142                                         &srq_id);
3143                 break;
3144         case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3145                 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3146
3147                 p_hwfn->p_rdma_info->events.affiliated_event(
3148                         p_hwfn->p_rdma_info->events.context,
3149                         QED_IWARP_EVENT_CQ_OVERFLOW,
3150                         (void *)fw_handle);
3151                 break;
3152         default:
3153                 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3154                        fw_event_code);
3155                 return -EINVAL;
3156         }
3157         return 0;
3158 }
3159
3160 int
3161 qed_iwarp_create_listen(void *rdma_cxt,
3162                         struct qed_iwarp_listen_in *iparams,
3163                         struct qed_iwarp_listen_out *oparams)
3164 {
3165         struct qed_hwfn *p_hwfn = rdma_cxt;
3166         struct qed_iwarp_listener *listener;
3167
3168         listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3169         if (!listener)
3170                 return -ENOMEM;
3171
3172         listener->ip_version = iparams->ip_version;
3173         memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3174         listener->port = iparams->port;
3175         listener->vlan = iparams->vlan;
3176
3177         listener->event_cb = iparams->event_cb;
3178         listener->cb_context = iparams->cb_context;
3179         listener->max_backlog = iparams->max_backlog;
3180         oparams->handle = listener;
3181
3182         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3183         list_add_tail(&listener->list_entry,
3184                       &p_hwfn->p_rdma_info->iwarp.listen_list);
3185         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3186
3187         DP_VERBOSE(p_hwfn,
3188                    QED_MSG_RDMA,
3189                    "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3190                    listener->event_cb,
3191                    listener,
3192                    listener->ip_addr[0],
3193                    listener->ip_addr[1],
3194                    listener->ip_addr[2],
3195                    listener->ip_addr[3], listener->port, listener->vlan);
3196
3197         return 0;
3198 }
3199
3200 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3201 {
3202         struct qed_iwarp_listener *listener = handle;
3203         struct qed_hwfn *p_hwfn = rdma_cxt;
3204
3205         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3206
3207         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3208         list_del(&listener->list_entry);
3209         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3210
3211         kfree(listener);
3212
3213         return 0;
3214 }
3215
3216 int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3217 {
3218         struct qed_hwfn *p_hwfn = rdma_cxt;
3219         struct qed_sp_init_data init_data;
3220         struct qed_spq_entry *p_ent;
3221         struct qed_iwarp_ep *ep;
3222         struct qed_rdma_qp *qp;
3223         int rc;
3224
3225         ep = iparams->ep_context;
3226         if (!ep) {
3227                 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3228                 return -EINVAL;
3229         }
3230
3231         qp = ep->qp;
3232
3233         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3234                    qp->icid, ep->tcp_cid);
3235
3236         memset(&init_data, 0, sizeof(init_data));
3237         init_data.cid = qp->icid;
3238         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3239         init_data.comp_mode = QED_SPQ_MODE_CB;
3240
3241         rc = qed_sp_init_request(p_hwfn, &p_ent,
3242                                  IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3243                                  PROTOCOLID_IWARP, &init_data);
3244
3245         if (rc)
3246                 return rc;
3247
3248         rc = qed_spq_post(p_hwfn, p_ent, NULL);
3249
3250         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3251
3252         return rc;
3253 }
3254
3255 void
3256 qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3257                    struct qed_rdma_query_qp_out_params *out_params)
3258 {
3259         out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3260 }