2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
4 * Copyright (C) 2003-2015 Chelsio Communications. All rights reserved.
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
15 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <scsi/scsi_host.h>
24 #include "cxgb3_defs.h"
25 #include "cxgb3_ctl_defs.h"
26 #include "cxgb3_offload.h"
27 #include "firmware_exports.h"
30 static unsigned int dbg_level;
31 #include "../libcxgbi.h"
33 #define DRV_MODULE_NAME "cxgb3i"
34 #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
35 #define DRV_MODULE_VERSION "2.0.1-ko"
36 #define DRV_MODULE_RELDATE "Apr. 2015"
38 static char version[] =
39 DRV_MODULE_DESC " " DRV_MODULE_NAME
40 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42 MODULE_AUTHOR("Chelsio Communications, Inc.");
43 MODULE_DESCRIPTION(DRV_MODULE_DESC);
44 MODULE_VERSION(DRV_MODULE_VERSION);
45 MODULE_LICENSE("GPL");
47 module_param(dbg_level, uint, 0644);
48 MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
50 static int cxgb3i_rcv_win = 256 * 1024;
51 module_param(cxgb3i_rcv_win, int, 0644);
52 MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
54 static int cxgb3i_snd_win = 128 * 1024;
55 module_param(cxgb3i_snd_win, int, 0644);
56 MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
58 static int cxgb3i_rx_credit_thres = 10 * 1024;
59 module_param(cxgb3i_rx_credit_thres, int, 0644);
60 MODULE_PARM_DESC(cxgb3i_rx_credit_thres,
61 "RX credits return threshold in bytes (default=10KB)");
63 static unsigned int cxgb3i_max_connect = 8 * 1024;
64 module_param(cxgb3i_max_connect, uint, 0644);
65 MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
67 static unsigned int cxgb3i_sport_base = 20000;
68 module_param(cxgb3i_sport_base, uint, 0644);
69 MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
71 static void cxgb3i_dev_open(struct t3cdev *);
72 static void cxgb3i_dev_close(struct t3cdev *);
73 static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
75 static struct cxgb3_client t3_client = {
76 .name = DRV_MODULE_NAME,
77 .handlers = cxgb3i_cpl_handlers,
78 .add = cxgb3i_dev_open,
79 .remove = cxgb3i_dev_close,
80 .event_handler = cxgb3i_dev_event_handler,
83 static struct scsi_host_template cxgb3i_host_template = {
84 .module = THIS_MODULE,
85 .name = DRV_MODULE_NAME,
86 .proc_name = DRV_MODULE_NAME,
87 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
88 .queuecommand = iscsi_queuecommand,
89 .change_queue_depth = scsi_change_queue_depth,
90 .sg_tablesize = SG_ALL,
91 .max_sectors = 0xFFFF,
92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
93 .eh_timed_out = iscsi_eh_cmd_timed_out,
94 .eh_abort_handler = iscsi_eh_abort,
95 .eh_device_reset_handler = iscsi_eh_device_reset,
96 .eh_target_reset_handler = iscsi_eh_recover_target,
97 .target_alloc = iscsi_target_alloc,
98 .dma_boundary = PAGE_SIZE - 1,
100 .track_queue_depth = 1,
103 static struct iscsi_transport cxgb3i_iscsi_transport = {
104 .owner = THIS_MODULE,
105 .name = DRV_MODULE_NAME,
106 /* owner and name should be set already */
107 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
108 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
109 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
110 .attr_is_visible = cxgbi_attr_is_visible,
111 .get_host_param = cxgbi_get_host_param,
112 .set_host_param = cxgbi_set_host_param,
113 /* session management */
114 .create_session = cxgbi_create_session,
115 .destroy_session = cxgbi_destroy_session,
116 .get_session_param = iscsi_session_get_param,
117 /* connection management */
118 .create_conn = cxgbi_create_conn,
119 .bind_conn = cxgbi_bind_conn,
120 .destroy_conn = iscsi_tcp_conn_teardown,
121 .start_conn = iscsi_conn_start,
122 .stop_conn = iscsi_conn_stop,
123 .get_conn_param = iscsi_conn_get_param,
124 .set_param = cxgbi_set_conn_param,
125 .get_stats = cxgbi_get_conn_stats,
126 /* pdu xmit req from user space */
127 .send_pdu = iscsi_conn_send_pdu,
129 .init_task = iscsi_tcp_task_init,
130 .xmit_task = iscsi_tcp_task_xmit,
131 .cleanup_task = cxgbi_cleanup_task,
133 .alloc_pdu = cxgbi_conn_alloc_pdu,
134 .init_pdu = cxgbi_conn_init_pdu,
135 .xmit_pdu = cxgbi_conn_xmit_pdu,
136 .parse_pdu_itt = cxgbi_parse_pdu_itt,
137 /* TCP connect/disconnect */
138 .get_ep_param = cxgbi_get_ep_param,
139 .ep_connect = cxgbi_ep_connect,
140 .ep_poll = cxgbi_ep_poll,
141 .ep_disconnect = cxgbi_ep_disconnect,
142 /* Error recovery timeout call */
143 .session_recovery_timedout = iscsi_session_recovery_timedout,
146 static struct scsi_transport_template *cxgb3i_stt;
149 * CPL (Chelsio Protocol Language) defines a message passing interface between
150 * the host driver and Chelsio asic.
151 * The section below implments CPLs that related to iscsi tcp connection
152 * open/close/abort and data send/receive.
155 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
157 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
158 const struct l2t_entry *e)
160 unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
161 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
163 skb->priority = CPL_PRIORITY_SETUP;
165 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
166 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
167 req->local_port = csk->saddr.sin_port;
168 req->peer_port = csk->daddr.sin_port;
169 req->local_ip = csk->saddr.sin_addr.s_addr;
170 req->peer_ip = csk->daddr.sin_addr.s_addr;
172 req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
173 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
174 V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
175 req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
176 V_RCV_BUFSIZ(csk->rcv_win >> 10));
178 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
179 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
180 csk, csk->state, csk->flags, csk->atid,
181 &req->local_ip, ntohs(req->local_port),
182 &req->peer_ip, ntohs(req->peer_port),
183 csk->mss_idx, e->idx, e->smt_idx);
185 l2t_send(csk->cdev->lldev, skb, csk->l2t);
188 static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
190 cxgbi_sock_act_open_req_arp_failure(NULL, skb);
194 * CPL connection close request: host ->
196 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
197 * the write queue (i.e., after any unsent txt data).
199 static void send_close_req(struct cxgbi_sock *csk)
201 struct sk_buff *skb = csk->cpl_close;
202 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
203 unsigned int tid = csk->tid;
205 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
206 "csk 0x%p,%u,0x%lx,%u.\n",
207 csk, csk->state, csk->flags, csk->tid);
209 csk->cpl_close = NULL;
210 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
211 req->wr.wr_lo = htonl(V_WR_TID(tid));
212 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
213 req->rsvd = htonl(csk->write_seq);
215 cxgbi_sock_skb_entail(csk, skb);
216 if (csk->state >= CTP_ESTABLISHED)
217 push_tx_frames(csk, 1);
221 * CPL connection abort request: host ->
223 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
224 * for the same connection and also that we do not try to send a message
225 * after the connection has closed.
227 static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
229 struct cpl_abort_req *req = cplhdr(skb);
231 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
232 "t3dev 0x%p, tid %u, skb 0x%p.\n",
233 tdev, GET_TID(req), skb);
234 req->cmd = CPL_ABORT_NO_RST;
235 cxgb3_ofld_send(tdev, skb);
238 static void send_abort_req(struct cxgbi_sock *csk)
240 struct sk_buff *skb = csk->cpl_abort_req;
241 struct cpl_abort_req *req;
243 if (unlikely(csk->state == CTP_ABORTING || !skb))
245 cxgbi_sock_set_state(csk, CTP_ABORTING);
246 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
247 /* Purge the send queue so we don't send anything after an abort. */
248 cxgbi_sock_purge_write_queue(csk);
250 csk->cpl_abort_req = NULL;
251 req = (struct cpl_abort_req *)skb->head;
252 skb->priority = CPL_PRIORITY_DATA;
253 set_arp_failure_handler(skb, abort_arp_failure);
254 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
255 req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
256 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
257 req->rsvd0 = htonl(csk->snd_nxt);
258 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
259 req->cmd = CPL_ABORT_SEND_RST;
261 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
262 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
263 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
266 l2t_send(csk->cdev->lldev, skb, csk->l2t);
270 * CPL connection abort reply: host ->
272 * Send an ABORT_RPL message in response of the ABORT_REQ received.
274 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
276 struct sk_buff *skb = csk->cpl_abort_rpl;
277 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
279 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
280 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
281 csk, csk->state, csk->flags, csk->tid, rst_status);
283 csk->cpl_abort_rpl = NULL;
284 skb->priority = CPL_PRIORITY_DATA;
285 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
286 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
287 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
288 rpl->cmd = rst_status;
289 cxgb3_ofld_send(csk->cdev->lldev, skb);
293 * CPL connection rx data ack: host ->
294 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
297 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
300 struct cpl_rx_data_ack *req;
301 u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
303 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
304 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
305 csk, csk->state, csk->flags, csk->tid, credits, dack);
307 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
309 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
312 req = (struct cpl_rx_data_ack *)skb->head;
313 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
314 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
315 req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
316 V_RX_CREDITS(credits));
317 skb->priority = CPL_PRIORITY_ACK;
318 cxgb3_ofld_send(csk->cdev->lldev, skb);
323 * CPL connection tx data: host ->
325 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
327 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
328 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
331 static unsigned int wrlen __read_mostly;
332 static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
334 static void init_wr_tab(unsigned int wr_len)
338 if (skb_wrs[1]) /* already initialized */
340 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
341 int sgl_len = (3 * i) / 2 + (i & 1);
344 skb_wrs[i] = (sgl_len <= wr_len
345 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
350 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
351 int len, int req_completion)
353 struct tx_data_wr *req;
354 struct l2t_entry *l2t = csk->l2t;
356 skb_reset_transport_header(skb);
357 req = __skb_push(skb, sizeof(*req));
358 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
359 (req_completion ? F_WR_COMPL : 0));
360 req->wr_lo = htonl(V_WR_TID(csk->tid));
361 /* len includes the length of any HW ULP additions */
362 req->len = htonl(len);
363 /* V_TX_ULP_SUBMODE sets both the mode and submode */
364 req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) |
365 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
366 req->sndseq = htonl(csk->snd_nxt);
367 req->param = htonl(V_TX_PORT(l2t->smt_idx));
369 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
370 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
371 V_TX_CPU_IDX(csk->rss_qid));
372 /* sendbuffer is in units of 32KB. */
373 req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
374 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
379 * push_tx_frames -- start transmit
380 * @c3cn: the offloaded connection
381 * @req_completion: request wr_ack or not
383 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
384 * connection's send queue and sends them on to T3. Must be called with the
385 * connection's lock held. Returns the amount of send buffer space that was
386 * freed as a result of sending queued data to T3.
389 static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
394 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
399 if (unlikely(csk->state < CTP_ESTABLISHED ||
400 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
401 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
402 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
403 csk, csk->state, csk->flags, csk->tid);
407 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
408 int len = skb->len; /* length before skb_push */
409 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
410 int wrs_needed = skb_wrs[frags];
412 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
415 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
417 if (csk->wr_cred < wrs_needed) {
418 log_debug(1 << CXGBI_DBG_PDU_TX,
419 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
420 csk, skb->len, skb->data_len, frags,
421 wrs_needed, csk->wr_cred);
425 __skb_unlink(skb, &csk->write_queue);
426 skb->priority = CPL_PRIORITY_DATA;
427 skb->csum = wrs_needed; /* remember this until the WR_ACK */
428 csk->wr_cred -= wrs_needed;
429 csk->wr_una_cred += wrs_needed;
430 cxgbi_sock_enqueue_wr(csk, skb);
432 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
433 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
434 "left %u, unack %u.\n",
435 csk, skb->len, skb->data_len, frags, skb->csum,
436 csk->wr_cred, csk->wr_una_cred);
438 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
439 if ((req_completion &&
440 csk->wr_una_cred == wrs_needed) ||
441 csk->wr_una_cred >= csk->wr_max_cred / 2) {
443 csk->wr_una_cred = 0;
445 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
446 make_tx_data_wr(csk, skb, len, req_completion);
448 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
450 total_size += skb->truesize;
451 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
452 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
454 set_arp_failure_handler(skb, arp_failure_skb_discard);
455 l2t_send(csk->cdev->lldev, skb, csk->l2t);
461 * Process a CPL_ACT_ESTABLISH message: -> host
462 * Updates connection state from an active establish CPL message. Runs with
463 * the connection lock held.
466 static inline void free_atid(struct cxgbi_sock *csk)
468 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
469 cxgb3_free_atid(csk->cdev->lldev, csk->atid);
470 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
475 static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
477 struct cxgbi_sock *csk = ctx;
478 struct cpl_act_establish *req = cplhdr(skb);
479 unsigned int tid = GET_TID(req);
480 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
481 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
483 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
484 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
485 atid, atid, csk, csk->state, csk->flags, rcv_isn);
488 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
490 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
494 csk->rss_qid = G_QNUM(ntohs(skb->csum));
496 spin_lock_bh(&csk->lock);
497 if (csk->retry_timer.function) {
498 del_timer(&csk->retry_timer);
499 csk->retry_timer.function = NULL;
502 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
503 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
504 csk, csk->state, csk->flags, csk->tid);
506 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
507 if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
508 csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
510 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
512 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
513 /* upper layer has requested closing */
516 if (skb_queue_len(&csk->write_queue))
517 push_tx_frames(csk, 1);
518 cxgbi_conn_tx_open(csk);
521 spin_unlock_bh(&csk->lock);
527 * Process a CPL_ACT_OPEN_RPL message: -> host
528 * Handle active open failures.
530 static int act_open_rpl_status_to_errno(int status)
533 case CPL_ERR_CONN_RESET:
534 return -ECONNREFUSED;
535 case CPL_ERR_ARP_MISS:
536 return -EHOSTUNREACH;
537 case CPL_ERR_CONN_TIMEDOUT:
539 case CPL_ERR_TCAM_FULL:
541 case CPL_ERR_CONN_EXIST:
548 static void act_open_retry_timer(struct timer_list *t)
550 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
553 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
554 "csk 0x%p,%u,0x%lx,%u.\n",
555 csk, csk->state, csk->flags, csk->tid);
558 spin_lock_bh(&csk->lock);
559 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
561 cxgbi_sock_fail_act_open(csk, -ENOMEM);
563 skb->sk = (struct sock *)csk;
564 set_arp_failure_handler(skb, act_open_arp_failure);
565 send_act_open_req(csk, skb, csk->l2t);
567 spin_unlock_bh(&csk->lock);
571 static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
573 struct cxgbi_sock *csk = ctx;
574 struct cpl_act_open_rpl *rpl = cplhdr(skb);
576 pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
577 csk, csk->state, csk->flags, csk->atid, rpl->status,
578 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
579 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
581 if (rpl->status != CPL_ERR_TCAM_FULL &&
582 rpl->status != CPL_ERR_CONN_EXIST &&
583 rpl->status != CPL_ERR_ARP_MISS)
584 cxgb3_queue_tid_release(tdev, GET_TID(rpl));
587 spin_lock_bh(&csk->lock);
588 if (rpl->status == CPL_ERR_CONN_EXIST &&
589 csk->retry_timer.function != act_open_retry_timer) {
590 csk->retry_timer.function = act_open_retry_timer;
591 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
593 cxgbi_sock_fail_act_open(csk,
594 act_open_rpl_status_to_errno(rpl->status));
596 spin_unlock_bh(&csk->lock);
603 * Process PEER_CLOSE CPL messages: -> host
606 static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
608 struct cxgbi_sock *csk = ctx;
610 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
611 "csk 0x%p,%u,0x%lx,%u.\n",
612 csk, csk->state, csk->flags, csk->tid);
614 cxgbi_sock_rcv_peer_close(csk);
620 * Process CLOSE_CONN_RPL CPL message: -> host
621 * Process a peer ACK to our FIN.
623 static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
626 struct cxgbi_sock *csk = ctx;
627 struct cpl_close_con_rpl *rpl = cplhdr(skb);
629 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
630 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
631 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
633 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
639 * Process ABORT_REQ_RSS CPL message: -> host
640 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
641 * request except that we need to reply to it.
644 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
647 switch (abort_reason) {
648 case CPL_ERR_BAD_SYN: /* fall through */
649 case CPL_ERR_CONN_RESET:
650 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
651 case CPL_ERR_XMIT_TIMEDOUT:
652 case CPL_ERR_PERSIST_TIMEDOUT:
653 case CPL_ERR_FINWAIT2_TIMEDOUT:
654 case CPL_ERR_KEEPALIVE_TIMEDOUT:
661 static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
663 const struct cpl_abort_req_rss *req = cplhdr(skb);
664 struct cxgbi_sock *csk = ctx;
665 int rst_status = CPL_ABORT_NO_RST;
667 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
668 "csk 0x%p,%u,0x%lx,%u.\n",
669 csk, csk->state, csk->flags, csk->tid);
671 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
672 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
677 spin_lock_bh(&csk->lock);
679 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
680 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
681 cxgbi_sock_set_state(csk, CTP_ABORTING);
685 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
686 send_abort_rpl(csk, rst_status);
688 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
689 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
690 cxgbi_sock_closed(csk);
694 spin_unlock_bh(&csk->lock);
702 * Process ABORT_RPL_RSS CPL message: -> host
703 * Process abort replies. We only process these messages if we anticipate
704 * them as the coordination between SW and HW in this area is somewhat lacking
705 * and sometimes we get ABORT_RPLs after we are done with the connection that
706 * originated the ABORT_REQ.
708 static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
710 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
711 struct cxgbi_sock *csk = ctx;
713 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
714 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
715 rpl->status, csk, csk ? csk->state : 0,
716 csk ? csk->flags : 0UL);
718 * Ignore replies to post-close aborts indicating that the abort was
719 * requested too late. These connections are terminated when we get
720 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
721 * arrives the TID is either no longer used or it has been recycled.
723 if (rpl->status == CPL_ERR_ABORT_FAILED)
726 * Sometimes we've already closed the connection, e.g., a post-close
727 * abort races with ABORT_REQ_RSS, the latter frees the connection
728 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
729 * but FW turns the ABORT_REQ into a regular one and so we get
730 * ABORT_RPL_RSS with status 0 and no connection.
733 cxgbi_sock_rcv_abort_rpl(csk);
740 * Process RX_ISCSI_HDR CPL message: -> host
741 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
742 * follow after the bhs.
744 static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
746 struct cxgbi_sock *csk = ctx;
747 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
748 struct cpl_iscsi_hdr_norss data_cpl;
749 struct cpl_rx_data_ddp_norss ddp_cpl;
750 unsigned int hdr_len, data_len, status;
754 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
755 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
756 csk, csk->state, csk->flags, csk->tid, skb, skb->len);
758 spin_lock_bh(&csk->lock);
760 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
761 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
762 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
763 csk, csk->state, csk->flags, csk->tid);
764 if (csk->state != CTP_ABORTING)
770 cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
771 cxgbi_skcb_flags(skb) = 0;
773 skb_reset_transport_header(skb);
774 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
776 len = hdr_len = ntohs(hdr_cpl->len);
777 /* msg coalesce is off or not enough data received */
778 if (skb->len <= hdr_len) {
779 pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
780 csk->cdev->ports[csk->port_id]->name, csk->tid,
784 cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
786 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
789 pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
790 csk->cdev->ports[csk->port_id]->name, csk->tid,
791 skb->len, sizeof(ddp_cpl), err);
795 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
796 cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
797 cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
798 status = ntohl(ddp_cpl.ddp_status);
800 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
801 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
802 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
804 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
805 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
806 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
807 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
808 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
809 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
811 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
812 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
814 pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
815 csk->cdev->ports[csk->port_id]->name,
816 csk->tid, sizeof(data_cpl), skb->len, err);
819 data_len = ntohs(data_cpl.len);
820 log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
821 "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
822 skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
823 len += sizeof(data_cpl) + data_len;
824 } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
825 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
827 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
828 __pskb_trim(skb, len);
829 __skb_queue_tail(&csk->receive_queue, skb);
830 cxgbi_conn_pdu_ready(csk);
832 spin_unlock_bh(&csk->lock);
838 spin_unlock_bh(&csk->lock);
844 * Process TX_DATA_ACK CPL messages: -> host
845 * Process an acknowledgment of WR completion. Advance snd_una and send the
846 * next batch of work requests from the write queue.
848 static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
850 struct cxgbi_sock *csk = ctx;
851 struct cpl_wr_ack *hdr = cplhdr(skb);
853 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
854 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
855 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
857 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
863 * for each connection, pre-allocate skbs needed for close/abort requests. So
864 * that we can service the request right away.
866 static int alloc_cpls(struct cxgbi_sock *csk)
868 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
872 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
874 if (!csk->cpl_abort_req)
877 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
879 if (!csk->cpl_abort_rpl)
885 cxgbi_sock_free_cpl_skbs(csk);
890 * release_offload_resources - release offload resource
891 * @c3cn: the offloaded iscsi tcp connection.
892 * Release resources held by an offload connection (TID, L2T entry, etc.)
894 static void l2t_put(struct cxgbi_sock *csk)
896 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
899 l2t_release(t3dev, csk->l2t);
905 static void release_offload_resources(struct cxgbi_sock *csk)
907 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
909 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
910 "csk 0x%p,%u,0x%lx,%u.\n",
911 csk, csk->state, csk->flags, csk->tid);
914 cxgbi_sock_free_cpl_skbs(csk);
916 if (csk->wr_cred != csk->wr_max_cred) {
917 cxgbi_sock_purge_wr_queue(csk);
918 cxgbi_sock_reset_wr_list(csk);
921 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
923 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
924 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
925 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
932 static void update_address(struct cxgbi_hba *chba)
934 if (chba->ipv4addr) {
936 chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
937 cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
938 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
939 pr_info("%s set %pI4.\n",
940 chba->vdev->name, &chba->ipv4addr);
941 } else if (chba->ipv4addr !=
942 cxgb3i_get_private_ipv4addr(chba->ndev)) {
943 cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
944 pr_info("%s set %pI4.\n",
945 chba->ndev->name, &chba->ipv4addr);
947 } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
949 cxgb3i_set_private_ipv4addr(chba->vdev, 0);
950 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
954 static int init_act_open(struct cxgbi_sock *csk)
956 struct dst_entry *dst = csk->dst;
957 struct cxgbi_device *cdev = csk->cdev;
958 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
959 struct net_device *ndev = cdev->ports[csk->port_id];
960 struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
961 struct sk_buff *skb = NULL;
963 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
964 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
966 update_address(chba);
968 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
971 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
972 &csk->daddr.sin_addr.s_addr);
974 pr_err("NO l2t available.\n");
979 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
981 pr_err("NO atid available.\n");
984 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
987 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
989 cxgb3_free_atid(t3dev, csk->atid);
993 skb->sk = (struct sock *)csk;
994 set_arp_failure_handler(skb, act_open_arp_failure);
995 csk->snd_win = cxgb3i_snd_win;
996 csk->rcv_win = cxgb3i_rcv_win;
998 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
999 csk->wr_una_cred = 0;
1000 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
1001 cxgbi_sock_reset_wr_list(csk);
1004 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1005 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1006 csk, csk->state, csk->flags,
1007 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1008 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1010 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1011 send_act_open_req(csk, skb, csk->l2t);
1015 cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
1016 [CPL_ACT_ESTABLISH] = do_act_establish,
1017 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1018 [CPL_PEER_CLOSE] = do_peer_close,
1019 [CPL_ABORT_REQ_RSS] = do_abort_req,
1020 [CPL_ABORT_RPL_RSS] = do_abort_rpl,
1021 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1022 [CPL_TX_DMA_ACK] = do_wr_ack,
1023 [CPL_ISCSI_HDR] = do_iscsi_hdr,
1027 * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
1028 * @cdev: cxgbi adapter
1030 static int cxgb3i_ofld_init(struct cxgbi_device *cdev)
1032 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
1033 struct adap_ports port;
1034 struct ofld_page_info rx_page_info;
1035 unsigned int wr_len;
1038 if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
1039 t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
1040 t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1041 pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
1045 if (cxgb3i_max_connect > CXGBI_MAX_CONN)
1046 cxgb3i_max_connect = CXGBI_MAX_CONN;
1048 rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
1049 cxgb3i_max_connect);
1053 init_wr_tab(wr_len);
1054 cdev->csk_release_offload_resources = release_offload_resources;
1055 cdev->csk_push_tx_frames = push_tx_frames;
1056 cdev->csk_send_abort_req = send_abort_req;
1057 cdev->csk_send_close_req = send_close_req;
1058 cdev->csk_send_rx_credits = send_rx_credits;
1059 cdev->csk_alloc_cpls = alloc_cpls;
1060 cdev->csk_init_act_open = init_act_open;
1062 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1067 * functions to program the pagepod in h/w
1069 static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
1071 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
1073 memset(req, 0, sizeof(*req));
1075 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
1076 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
1077 V_ULPTX_CMD(ULP_MEM_WRITE));
1078 req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) |
1079 V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1));
1082 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
1084 return ((struct t3cdev *)cdev->lldev)->ulp_iscsi;
1087 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1088 struct cxgbi_task_tag_info *ttinfo)
1090 unsigned int idx = ttinfo->idx;
1091 unsigned int npods = ttinfo->npods;
1092 struct scatterlist *sg = ttinfo->sgl;
1093 struct cxgbi_pagepod *ppod;
1094 struct ulp_mem_io *req;
1095 unsigned int sg_off;
1096 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1099 for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
1100 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1101 IPPOD_SIZE, 0, GFP_ATOMIC);
1105 ulp_mem_io_set_hdr(skb, pm_addr);
1106 req = (struct ulp_mem_io *)skb->head;
1107 ppod = (struct cxgbi_pagepod *)(req + 1);
1108 sg_off = i * PPOD_PAGES_MAX;
1109 cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg,
1111 skb->priority = CPL_PRIORITY_CONTROL;
1112 cxgb3_ofld_send(ppm->lldev, skb);
1117 static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
1118 struct cxgbi_task_tag_info *ttinfo)
1120 unsigned int idx = ttinfo->idx;
1121 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1122 unsigned int npods = ttinfo->npods;
1125 log_debug(1 << CXGBI_DBG_DDP,
1126 "cdev 0x%p, clear idx %u, npods %u.\n",
1129 for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
1130 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1131 IPPOD_SIZE, 0, GFP_ATOMIC);
1134 pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n",
1135 cdev, idx, i, npods);
1138 ulp_mem_io_set_hdr(skb, pm_addr);
1139 skb->priority = CPL_PRIORITY_CONTROL;
1140 cxgb3_ofld_send(ppm->lldev, skb);
1144 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1145 unsigned int tid, int pg_idx)
1147 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1149 struct cpl_set_tcb_field *req;
1150 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
1152 log_debug(1 << CXGBI_DBG_DDP,
1153 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1157 /* set up ulp submode and page size */
1158 req = (struct cpl_set_tcb_field *)skb->head;
1159 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1160 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1161 req->reply = V_NO_REPLY(1);
1163 req->word = htons(31);
1164 req->mask = cpu_to_be64(0xF0000000);
1165 req->val = cpu_to_be64(val << 28);
1166 skb->priority = CPL_PRIORITY_CONTROL;
1168 cxgb3_ofld_send(csk->cdev->lldev, skb);
1173 * cxgb3i_setup_conn_digest - setup conn. digest setting
1174 * @csk: cxgb tcp socket
1175 * @tid: connection id
1176 * @hcrc: header digest enabled
1177 * @dcrc: data digest enabled
1178 * set up the iscsi digest settings for a connection identified by tid
1180 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1183 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1185 struct cpl_set_tcb_field *req;
1186 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
1188 log_debug(1 << CXGBI_DBG_DDP,
1189 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1193 /* set up ulp submode and page size */
1194 req = (struct cpl_set_tcb_field *)skb->head;
1195 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1196 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1197 req->reply = V_NO_REPLY(1);
1199 req->word = htons(31);
1200 req->mask = cpu_to_be64(0x0F000000);
1201 req->val = cpu_to_be64(val << 24);
1202 skb->priority = CPL_PRIORITY_CONTROL;
1204 cxgb3_ofld_send(csk->cdev->lldev, skb);
1209 * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource
1210 * @cdev: cxgb3i adapter
1211 * initialize the ddp pagepod manager for a given adapter
1213 static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1215 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1216 struct net_device *ndev = cdev->ports[0];
1217 struct cxgbi_tag_format tformat;
1218 unsigned int ppmax, tagmask = 0;
1219 struct ulp_iscsi_info uinfo;
1222 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
1224 pr_err("%s, failed to get iscsi param %d.\n",
1228 if (uinfo.llimit >= uinfo.ulimit) {
1229 pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n",
1230 ndev->name, uinfo.llimit, uinfo.ulimit);
1234 ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
1235 tagmask = cxgbi_tagmask_set(ppmax);
1237 pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n",
1238 ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask,
1241 memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
1242 for (i = 0; i < 4; i++)
1243 tformat.pgsz_order[i] = uinfo.pgsz_factor[i];
1244 cxgbi_tagmask_check(tagmask, &tformat);
1246 err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat,
1247 (uinfo.ulimit - uinfo.llimit + 1),
1248 uinfo.llimit, uinfo.llimit, 0, 0, 0);
1252 if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) {
1253 uinfo.tagmask = tagmask;
1254 uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
1256 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
1258 pr_err("T3 %s fail to set iscsi param %d.\n",
1260 cdev->flags |= CXGBI_FLAG_DDP_OFF;
1265 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1266 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1267 cdev->csk_ddp_set_map = ddp_set_map;
1268 cdev->csk_ddp_clear_map = ddp_clear_map;
1269 cdev->cdev2ppm = cdev2ppm;
1270 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1271 uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1272 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1273 uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1278 static void cxgb3i_dev_close(struct t3cdev *t3dev)
1280 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1282 if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
1283 pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
1287 cxgbi_device_unregister(cdev);
1291 * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
1292 * @t3dev: t3cdev adapter
1294 static void cxgb3i_dev_open(struct t3cdev *t3dev)
1296 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1297 struct adapter *adapter = tdev2adap(t3dev);
1301 pr_info("0x%p, updating.\n", cdev);
1305 cdev = cxgbi_device_register(0, adapter->params.nports);
1307 pr_warn("device 0x%p register failed.\n", t3dev);
1311 cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
1312 cdev->lldev = t3dev;
1313 cdev->pdev = adapter->pdev;
1314 cdev->ports = adapter->port;
1315 cdev->nports = adapter->params.nports;
1316 cdev->mtus = adapter->params.mtus;
1317 cdev->nmtus = NMTUS;
1318 cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
1319 cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
1320 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
1321 cdev->itp = &cxgb3i_iscsi_transport;
1323 err = cxgb3i_ddp_init(cdev);
1325 pr_info("0x%p ddp init failed %d\n", cdev, err);
1329 err = cxgb3i_ofld_init(cdev);
1331 pr_info("0x%p offload init failed\n", cdev);
1335 err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
1336 &cxgb3i_host_template, cxgb3i_stt);
1340 for (i = 0; i < cdev->nports; i++)
1341 cdev->hbas[i]->ipv4addr =
1342 cxgb3i_get_private_ipv4addr(cdev->ports[i]);
1344 pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
1345 cdev, cdev ? cdev->flags : 0, t3dev, err);
1349 cxgbi_device_unregister(cdev);
1352 static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
1354 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1356 log_debug(1 << CXGBI_DBG_TOE,
1357 "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
1358 t3dev, cdev, event, port);
1363 case OFFLOAD_STATUS_DOWN:
1364 cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
1366 case OFFLOAD_STATUS_UP:
1367 cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
1373 * cxgb3i_init_module - module init entry point
1375 * initialize any driver wide global data structures and register itself
1376 * with the cxgb3 module
1378 static int __init cxgb3i_init_module(void)
1382 printk(KERN_INFO "%s", version);
1384 rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1388 cxgb3_register_client(&t3_client);
1393 * cxgb3i_exit_module - module cleanup/exit entry point
1395 * go through the driver hba list and for each hba, release any resource held.
1396 * and unregisters iscsi transport and the cxgb3 module
1398 static void __exit cxgb3i_exit_module(void)
1400 cxgb3_unregister_client(&t3_client);
1401 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
1402 cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1405 module_init(cxgb3i_init_module);
1406 module_exit(cxgb3i_exit_module);