1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * iSCSI Initiator over TCP/IP Data-Path
5 * Copyright (C) 2004 Dmitry Yusupov
6 * Copyright (C) 2004 Alex Aizman
7 * Copyright (C) 2005 - 2006 Mike Christie
8 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
9 * maintained by open-iscsi@googlegroups.com
11 * See the file COPYING included with this distribution for more details.
20 #include <crypto/hash.h>
21 #include <linux/types.h>
22 #include <linux/inet.h>
23 #include <linux/slab.h>
24 #include <linux/sched/mm.h>
25 #include <linux/file.h>
26 #include <linux/blkdev.h>
27 #include <linux/delay.h>
28 #include <linux/kfifo.h>
29 #include <linux/scatterlist.h>
30 #include <linux/module.h>
31 #include <linux/backing-dev.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_transport_iscsi.h>
38 #include <trace/events/iscsi.h>
39 #include <trace/events/sock.h>
41 #include "iscsi_tcp.h"
43 MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
44 "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
45 "Alex Aizman <itn780@yahoo.com>");
46 MODULE_DESCRIPTION("iSCSI/TCP data-path");
47 MODULE_LICENSE("GPL");
49 static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
50 static const struct scsi_host_template iscsi_sw_tcp_sht;
51 static struct iscsi_transport iscsi_sw_tcp_transport;
53 static unsigned int iscsi_max_lun = ~0;
54 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
56 static bool iscsi_recv_from_iscsi_q;
57 module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
58 MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
60 static int iscsi_sw_tcp_dbg;
61 module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
63 MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module "
64 "Set to 1 to turn on, and zero to turn off. Default is off.");
66 #define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...) \
68 if (iscsi_sw_tcp_dbg) \
69 iscsi_conn_printk(KERN_INFO, _conn, \
72 iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp, \
73 &(_conn)->cls_conn->dev, \
74 "%s " dbg_fmt, __func__, ##arg);\
79 * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
80 * @rd_desc: read descriptor
82 * @offset: offset in skb
83 * @len: skb->len - offset
85 static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
86 unsigned int offset, size_t len)
88 struct iscsi_conn *conn = rd_desc->arg.data;
89 unsigned int consumed, total_consumed = 0;
92 ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset);
96 consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
98 total_consumed += consumed;
99 } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
101 ISCSI_SW_TCP_DBG(conn, "read %d bytes status %d\n",
102 skb->len - offset, status);
103 return total_consumed;
107 * iscsi_sw_sk_state_check - check socket state
110 * If the socket is in CLOSE or CLOSE_WAIT we should
111 * not close the connection if there is still some
114 * Must be called with sk_callback_lock.
116 static inline int iscsi_sw_sk_state_check(struct sock *sk)
118 struct iscsi_conn *conn = sk->sk_user_data;
120 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
121 (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
122 !atomic_read(&sk->sk_rmem_alloc)) {
123 ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
124 iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
130 static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
132 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
133 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
134 struct sock *sk = tcp_sw_conn->sock->sk;
135 read_descriptor_t rd_desc;
138 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
139 * We set count to 1 because we want the network layer to
140 * hand us all the skbs that are available. iscsi_tcp_recv
141 * handled pdus that cross buffers or pdus that still need data.
143 rd_desc.arg.data = conn;
146 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
148 /* If we had to (atomically) map a highmem page,
150 iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
152 iscsi_sw_sk_state_check(sk);
155 static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
157 struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
159 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
160 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
161 struct sock *sk = tcp_sw_conn->sock->sk;
164 iscsi_sw_tcp_recv_data(conn);
168 static void iscsi_sw_tcp_data_ready(struct sock *sk)
170 struct iscsi_sw_tcp_conn *tcp_sw_conn;
171 struct iscsi_tcp_conn *tcp_conn;
172 struct iscsi_conn *conn;
174 trace_sk_data_ready(sk);
176 read_lock_bh(&sk->sk_callback_lock);
177 conn = sk->sk_user_data;
179 read_unlock_bh(&sk->sk_callback_lock);
182 tcp_conn = conn->dd_data;
183 tcp_sw_conn = tcp_conn->dd_data;
185 if (tcp_sw_conn->queue_recv)
186 iscsi_conn_queue_recv(conn);
188 iscsi_sw_tcp_recv_data(conn);
189 read_unlock_bh(&sk->sk_callback_lock);
192 static void iscsi_sw_tcp_state_change(struct sock *sk)
194 struct iscsi_tcp_conn *tcp_conn;
195 struct iscsi_sw_tcp_conn *tcp_sw_conn;
196 struct iscsi_conn *conn;
197 void (*old_state_change)(struct sock *);
199 read_lock_bh(&sk->sk_callback_lock);
200 conn = sk->sk_user_data;
202 read_unlock_bh(&sk->sk_callback_lock);
206 iscsi_sw_sk_state_check(sk);
208 tcp_conn = conn->dd_data;
209 tcp_sw_conn = tcp_conn->dd_data;
210 old_state_change = tcp_sw_conn->old_state_change;
212 read_unlock_bh(&sk->sk_callback_lock);
214 old_state_change(sk);
218 * iscsi_sw_tcp_write_space - Called when more output buffer space is available
219 * @sk: socket space is available for
221 static void iscsi_sw_tcp_write_space(struct sock *sk)
223 struct iscsi_conn *conn;
224 struct iscsi_tcp_conn *tcp_conn;
225 struct iscsi_sw_tcp_conn *tcp_sw_conn;
226 void (*old_write_space)(struct sock *);
228 read_lock_bh(&sk->sk_callback_lock);
229 conn = sk->sk_user_data;
231 read_unlock_bh(&sk->sk_callback_lock);
235 tcp_conn = conn->dd_data;
236 tcp_sw_conn = tcp_conn->dd_data;
237 old_write_space = tcp_sw_conn->old_write_space;
238 read_unlock_bh(&sk->sk_callback_lock);
242 ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
243 iscsi_conn_queue_xmit(conn);
246 static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
248 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
249 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
250 struct sock *sk = tcp_sw_conn->sock->sk;
252 /* assign new callbacks */
253 write_lock_bh(&sk->sk_callback_lock);
254 sk->sk_user_data = conn;
255 tcp_sw_conn->old_data_ready = sk->sk_data_ready;
256 tcp_sw_conn->old_state_change = sk->sk_state_change;
257 tcp_sw_conn->old_write_space = sk->sk_write_space;
258 sk->sk_data_ready = iscsi_sw_tcp_data_ready;
259 sk->sk_state_change = iscsi_sw_tcp_state_change;
260 sk->sk_write_space = iscsi_sw_tcp_write_space;
261 write_unlock_bh(&sk->sk_callback_lock);
265 iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
267 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
268 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
269 struct sock *sk = tcp_sw_conn->sock->sk;
271 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
272 write_lock_bh(&sk->sk_callback_lock);
273 sk->sk_user_data = NULL;
274 sk->sk_data_ready = tcp_sw_conn->old_data_ready;
275 sk->sk_state_change = tcp_sw_conn->old_state_change;
276 sk->sk_write_space = tcp_sw_conn->old_write_space;
277 sk->sk_no_check_tx = 0;
278 write_unlock_bh(&sk->sk_callback_lock);
282 * iscsi_sw_tcp_xmit_segment - transmit segment
283 * @tcp_conn: the iSCSI TCP connection
284 * @segment: the buffer to transmnit
286 * This function transmits as much of the buffer as
287 * the network layer will accept, and returns the number of
290 * If CRC hashing is enabled, the function will compute the
291 * hash as it goes. When the entire segment has been transmitted,
292 * it will retrieve the hash value and send it as well.
294 static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
295 struct iscsi_segment *segment)
297 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
298 struct socket *sk = tcp_sw_conn->sock;
299 unsigned int copied = 0;
302 while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
303 struct scatterlist *sg;
304 unsigned int offset, copy;
308 offset = segment->copied;
309 copy = segment->size - offset;
311 if (segment->total_copied + segment->size < segment->total_size)
312 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
314 if (tcp_sw_conn->queue_recv)
315 flags |= MSG_DONTWAIT;
317 /* Use sendpage if we can; else fall back to sendmsg */
318 if (!segment->data) {
320 offset += segment->sg_offset + sg->offset;
321 r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
324 struct msghdr msg = { .msg_flags = flags };
326 .iov_base = segment->data + offset,
330 r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
334 iscsi_tcp_segment_unmap(segment);
343 * iscsi_sw_tcp_xmit - TCP transmit
344 * @conn: iscsi connection
346 static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
348 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
349 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
350 struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
351 unsigned int consumed = 0;
355 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
357 * We may not have been able to send data because the conn
358 * is getting stopped. libiscsi will know so propagate err
359 * for it to do the right thing.
364 rc = ISCSI_ERR_XMIT_FAILED;
371 if (segment->total_copied >= segment->total_size) {
372 if (segment->done != NULL) {
373 rc = segment->done(tcp_conn, segment);
380 ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);
382 conn->txdata_octets += consumed;
386 /* Transmit error. We could initiate error recovery
388 ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
389 iscsi_conn_failure(conn, rc);
394 * iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit
395 * @conn: iscsi connection
397 static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
399 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
400 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
401 struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
403 return segment->total_copied - segment->total_size;
406 static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
408 struct iscsi_conn *conn = task->conn;
409 unsigned int noreclaim_flag;
410 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
411 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
414 if (!tcp_sw_conn->sock) {
415 iscsi_conn_printk(KERN_ERR, conn,
416 "Transport not bound to socket!\n");
420 noreclaim_flag = memalloc_noreclaim_save();
422 while (iscsi_sw_tcp_xmit_qlen(conn)) {
423 rc = iscsi_sw_tcp_xmit(conn);
433 memalloc_noreclaim_restore(noreclaim_flag);
438 * This is called when we're done sending the header.
439 * Simply copy the data_segment to the send segment, and return.
441 static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
442 struct iscsi_segment *segment)
444 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
446 tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
447 ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
448 "Header done. Next segment size %u total_size %u\n",
449 tcp_sw_conn->out.segment.size,
450 tcp_sw_conn->out.segment.total_size);
454 static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
457 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
458 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
460 ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
461 "digest enabled" : "digest disabled");
463 /* Clear the data segment - needs to be filled in by the
464 * caller using iscsi_tcp_send_data_prep() */
465 memset(&tcp_sw_conn->out.data_segment, 0,
466 sizeof(struct iscsi_segment));
468 /* If header digest is enabled, compute the CRC and
469 * place the digest into the same buffer. We make
470 * sure that both iscsi_tcp_task and mtask have
473 if (conn->hdrdgst_en) {
474 iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
476 hdrlen += ISCSI_DIGEST_SIZE;
479 /* Remember header pointer for later, when we need
480 * to decide whether there's a payload to go along
481 * with the header. */
482 tcp_sw_conn->out.hdr = hdr;
484 iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
485 iscsi_sw_tcp_send_hdr_done, NULL);
489 * Prepare the send buffer for the payload data.
490 * Padding and checksumming will all be taken care
491 * of by the iscsi_segment routines.
494 iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
495 unsigned int count, unsigned int offset,
498 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
499 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
500 struct ahash_request *tx_hash = NULL;
501 unsigned int hdr_spec_len;
503 ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
505 "digest enabled" : "digest disabled");
507 /* Make sure the datalen matches what the caller
508 said he would send. */
509 hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
510 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
512 if (conn->datadgst_en)
513 tx_hash = tcp_sw_conn->tx_hash;
515 return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
516 sg, count, offset, len,
521 iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
524 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
525 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
526 struct ahash_request *tx_hash = NULL;
527 unsigned int hdr_spec_len;
529 ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
530 "digest enabled" : "digest disabled");
532 /* Make sure the datalen matches what the caller
533 said he would send. */
534 hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
535 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
537 if (conn->datadgst_en)
538 tx_hash = tcp_sw_conn->tx_hash;
540 iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
541 data, len, NULL, tx_hash);
544 static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
545 unsigned int offset, unsigned int count)
547 struct iscsi_conn *conn = task->conn;
550 iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
556 iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
558 struct scsi_data_buffer *sdb = &task->sc->sdb;
560 err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
561 sdb->table.nents, offset,
566 /* got invalid offset/len */
572 static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
574 struct iscsi_tcp_task *tcp_task = task->dd_data;
576 task->hdr = task->dd_data + sizeof(*tcp_task);
577 task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
581 static struct iscsi_cls_conn *
582 iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
585 struct iscsi_conn *conn;
586 struct iscsi_cls_conn *cls_conn;
587 struct iscsi_tcp_conn *tcp_conn;
588 struct iscsi_sw_tcp_conn *tcp_sw_conn;
589 struct crypto_ahash *tfm;
591 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
595 conn = cls_conn->dd_data;
596 tcp_conn = conn->dd_data;
597 tcp_sw_conn = tcp_conn->dd_data;
598 INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
599 tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
601 mutex_init(&tcp_sw_conn->sock_lock);
603 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
607 tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
608 if (!tcp_sw_conn->tx_hash)
610 ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
612 tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
613 if (!tcp_sw_conn->rx_hash)
615 ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
617 tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
622 ahash_request_free(tcp_sw_conn->tx_hash);
624 crypto_free_ahash(tfm);
626 iscsi_conn_printk(KERN_ERR, conn,
627 "Could not create connection due to crc32c "
628 "loading error. Make sure the crc32c "
629 "module is built as a module or into the "
631 iscsi_tcp_conn_teardown(cls_conn);
635 static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
637 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
638 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
639 struct socket *sock = tcp_sw_conn->sock;
642 * The iscsi transport class will make sure we are not called in
643 * parallel with start, stop, bind and destroys. However, this can be
644 * called twice if userspace does a stop then a destroy.
650 * Make sure we start socket shutdown now in case userspace is up
651 * but delayed in releasing the socket.
653 kernel_sock_shutdown(sock, SHUT_RDWR);
656 iscsi_sw_tcp_conn_restore_callbacks(conn);
659 iscsi_suspend_rx(conn);
661 mutex_lock(&tcp_sw_conn->sock_lock);
662 tcp_sw_conn->sock = NULL;
663 mutex_unlock(&tcp_sw_conn->sock_lock);
667 static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
669 struct iscsi_conn *conn = cls_conn->dd_data;
670 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
671 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
673 iscsi_sw_tcp_release_conn(conn);
675 ahash_request_free(tcp_sw_conn->rx_hash);
676 if (tcp_sw_conn->tx_hash) {
677 struct crypto_ahash *tfm;
679 tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
680 ahash_request_free(tcp_sw_conn->tx_hash);
681 crypto_free_ahash(tfm);
684 iscsi_tcp_conn_teardown(cls_conn);
687 static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
689 struct iscsi_conn *conn = cls_conn->dd_data;
690 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
691 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
692 struct socket *sock = tcp_sw_conn->sock;
694 /* userspace may have goofed up and not bound us */
698 sock->sk->sk_err = EIO;
699 wake_up_interruptible(sk_sleep(sock->sk));
702 iscsi_suspend_tx(conn);
704 /* stop recv side and release socket */
705 iscsi_sw_tcp_release_conn(conn);
707 iscsi_conn_stop(cls_conn, flag);
711 iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
712 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
715 struct iscsi_conn *conn = cls_conn->dd_data;
716 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
717 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
722 /* lookup for existing socket */
723 sock = sockfd_lookup((int)transport_eph, &err);
725 iscsi_conn_printk(KERN_ERR, conn,
726 "sockfd_lookup failed %d\n", err);
730 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
734 mutex_lock(&tcp_sw_conn->sock_lock);
735 /* bind iSCSI connection and socket */
736 tcp_sw_conn->sock = sock;
737 mutex_unlock(&tcp_sw_conn->sock_lock);
739 /* setup Socket parameters */
741 sk->sk_reuse = SK_CAN_REUSE;
742 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
743 sk->sk_allocation = GFP_ATOMIC;
744 sk->sk_use_task_frag = false;
748 iscsi_sw_tcp_conn_set_callbacks(conn);
749 tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
751 * set receive state machine into initial state
753 iscsi_tcp_hdr_recv_prep(tcp_conn);
761 static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
762 enum iscsi_param param, char *buf,
765 struct iscsi_conn *conn = cls_conn->dd_data;
766 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
767 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
770 case ISCSI_PARAM_HDRDGST_EN:
771 iscsi_set_param(cls_conn, param, buf, buflen);
773 case ISCSI_PARAM_DATADGST_EN:
774 mutex_lock(&tcp_sw_conn->sock_lock);
775 if (!tcp_sw_conn->sock) {
776 mutex_unlock(&tcp_sw_conn->sock_lock);
779 iscsi_set_param(cls_conn, param, buf, buflen);
780 tcp_sw_conn->sendpage = conn->datadgst_en ?
781 sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
782 mutex_unlock(&tcp_sw_conn->sock_lock);
784 case ISCSI_PARAM_MAX_R2T:
785 return iscsi_tcp_set_max_r2t(conn, buf);
787 return iscsi_set_param(cls_conn, param, buf, buflen);
793 static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
794 enum iscsi_param param, char *buf)
796 struct iscsi_conn *conn = cls_conn->dd_data;
797 struct iscsi_sw_tcp_conn *tcp_sw_conn;
798 struct iscsi_tcp_conn *tcp_conn;
799 struct sockaddr_in6 addr;
804 case ISCSI_PARAM_CONN_PORT:
805 case ISCSI_PARAM_CONN_ADDRESS:
806 case ISCSI_PARAM_LOCAL_PORT:
807 spin_lock_bh(&conn->session->frwd_lock);
808 if (!conn->session->leadconn) {
809 spin_unlock_bh(&conn->session->frwd_lock);
813 * The conn has been setup and bound, so just grab a ref
814 * incase a destroy runs while we are in the net layer.
816 iscsi_get_conn(conn->cls_conn);
817 spin_unlock_bh(&conn->session->frwd_lock);
819 tcp_conn = conn->dd_data;
820 tcp_sw_conn = tcp_conn->dd_data;
822 mutex_lock(&tcp_sw_conn->sock_lock);
823 sock = tcp_sw_conn->sock;
829 if (param == ISCSI_PARAM_LOCAL_PORT)
830 rc = kernel_getsockname(sock,
831 (struct sockaddr *)&addr);
833 rc = kernel_getpeername(sock,
834 (struct sockaddr *)&addr);
836 mutex_unlock(&tcp_sw_conn->sock_lock);
837 iscsi_put_conn(conn->cls_conn);
841 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
844 return iscsi_conn_get_param(cls_conn, param, buf);
850 static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
851 enum iscsi_host_param param, char *buf)
853 struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
854 struct iscsi_session *session;
855 struct iscsi_conn *conn;
856 struct iscsi_tcp_conn *tcp_conn;
857 struct iscsi_sw_tcp_conn *tcp_sw_conn;
858 struct sockaddr_in6 addr;
863 case ISCSI_HOST_PARAM_IPADDRESS:
864 session = tcp_sw_host->session;
868 spin_lock_bh(&session->frwd_lock);
869 conn = session->leadconn;
871 spin_unlock_bh(&session->frwd_lock);
874 tcp_conn = conn->dd_data;
875 tcp_sw_conn = tcp_conn->dd_data;
877 * The conn has been setup and bound, so just grab a ref
878 * incase a destroy runs while we are in the net layer.
880 iscsi_get_conn(conn->cls_conn);
881 spin_unlock_bh(&session->frwd_lock);
883 mutex_lock(&tcp_sw_conn->sock_lock);
884 sock = tcp_sw_conn->sock;
888 rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
889 mutex_unlock(&tcp_sw_conn->sock_lock);
890 iscsi_put_conn(conn->cls_conn);
894 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
896 (enum iscsi_param)param, buf);
898 return iscsi_host_get_param(shost, param, buf);
905 iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
906 struct iscsi_stats *stats)
908 struct iscsi_conn *conn = cls_conn->dd_data;
909 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
910 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
912 stats->custom_length = 3;
913 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
914 stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
915 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
916 stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
917 strcpy(stats->custom[2].desc, "eh_abort_cnt");
918 stats->custom[2].value = conn->eh_abort_cnt;
920 iscsi_tcp_conn_get_stats(cls_conn, stats);
923 static struct iscsi_cls_session *
924 iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
925 uint16_t qdepth, uint32_t initial_cmdsn)
927 struct iscsi_cls_session *cls_session;
928 struct iscsi_session *session;
929 struct iscsi_sw_tcp_host *tcp_sw_host;
930 struct Scsi_Host *shost;
934 printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
938 shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
939 sizeof(struct iscsi_sw_tcp_host), 1);
942 shost->transportt = iscsi_sw_tcp_scsi_transport;
943 shost->cmd_per_lun = qdepth;
944 shost->max_lun = iscsi_max_lun;
946 shost->max_channel = 0;
947 shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
949 rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
952 shost->can_queue = rc;
954 if (iscsi_host_add(shost, NULL))
957 cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
959 sizeof(struct iscsi_tcp_task) +
960 sizeof(struct iscsi_sw_tcp_hdrbuf),
964 session = cls_session->dd_data;
966 if (iscsi_tcp_r2tpool_alloc(session))
969 /* We are now fully setup so expose the session to sysfs. */
970 tcp_sw_host = iscsi_host_priv(shost);
971 tcp_sw_host->session = session;
975 iscsi_session_teardown(cls_session);
977 iscsi_host_remove(shost, false);
979 iscsi_host_free(shost);
983 static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
985 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
986 struct iscsi_session *session = cls_session->dd_data;
988 if (WARN_ON_ONCE(session->leadconn))
991 iscsi_session_remove(cls_session);
993 * Our get_host_param needs to access the session, so remove the
994 * host from sysfs before freeing the session to make sure userspace
995 * is no longer accessing the callout.
997 iscsi_host_remove(shost, false);
999 iscsi_tcp_r2tpool_free(cls_session->dd_data);
1001 iscsi_session_free(cls_session);
1002 iscsi_host_free(shost);
1005 static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
1007 switch (param_type) {
1008 case ISCSI_HOST_PARAM:
1010 case ISCSI_HOST_PARAM_NETDEV_NAME:
1011 case ISCSI_HOST_PARAM_HWADDRESS:
1012 case ISCSI_HOST_PARAM_IPADDRESS:
1013 case ISCSI_HOST_PARAM_INITIATOR_NAME:
1020 case ISCSI_PARAM_MAX_RECV_DLENGTH:
1021 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1022 case ISCSI_PARAM_HDRDGST_EN:
1023 case ISCSI_PARAM_DATADGST_EN:
1024 case ISCSI_PARAM_CONN_ADDRESS:
1025 case ISCSI_PARAM_CONN_PORT:
1026 case ISCSI_PARAM_LOCAL_PORT:
1027 case ISCSI_PARAM_EXP_STATSN:
1028 case ISCSI_PARAM_PERSISTENT_ADDRESS:
1029 case ISCSI_PARAM_PERSISTENT_PORT:
1030 case ISCSI_PARAM_PING_TMO:
1031 case ISCSI_PARAM_RECV_TMO:
1032 case ISCSI_PARAM_INITIAL_R2T_EN:
1033 case ISCSI_PARAM_MAX_R2T:
1034 case ISCSI_PARAM_IMM_DATA_EN:
1035 case ISCSI_PARAM_FIRST_BURST:
1036 case ISCSI_PARAM_MAX_BURST:
1037 case ISCSI_PARAM_PDU_INORDER_EN:
1038 case ISCSI_PARAM_DATASEQ_INORDER_EN:
1039 case ISCSI_PARAM_ERL:
1040 case ISCSI_PARAM_TARGET_NAME:
1041 case ISCSI_PARAM_TPGT:
1042 case ISCSI_PARAM_USERNAME:
1043 case ISCSI_PARAM_PASSWORD:
1044 case ISCSI_PARAM_USERNAME_IN:
1045 case ISCSI_PARAM_PASSWORD_IN:
1046 case ISCSI_PARAM_FAST_ABORT:
1047 case ISCSI_PARAM_ABORT_TMO:
1048 case ISCSI_PARAM_LU_RESET_TMO:
1049 case ISCSI_PARAM_TGT_RESET_TMO:
1050 case ISCSI_PARAM_IFACE_NAME:
1051 case ISCSI_PARAM_INITIATOR_NAME:
1061 static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
1063 struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
1064 struct iscsi_session *session = tcp_sw_host->session;
1065 struct iscsi_conn *conn = session->leadconn;
1067 if (conn->datadgst_en)
1068 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
1069 sdev->request_queue);
1070 blk_queue_dma_alignment(sdev->request_queue, 0);
1074 static const struct scsi_host_template iscsi_sw_tcp_sht = {
1075 .module = THIS_MODULE,
1076 .name = "iSCSI Initiator over TCP/IP",
1077 .queuecommand = iscsi_queuecommand,
1078 .change_queue_depth = scsi_change_queue_depth,
1079 .can_queue = ISCSI_TOTAL_CMDS_MAX,
1080 .sg_tablesize = 4096,
1081 .max_sectors = 0xFFFF,
1082 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
1083 .eh_timed_out = iscsi_eh_cmd_timed_out,
1084 .eh_abort_handler = iscsi_eh_abort,
1085 .eh_device_reset_handler= iscsi_eh_device_reset,
1086 .eh_target_reset_handler = iscsi_eh_recover_target,
1087 .dma_boundary = PAGE_SIZE - 1,
1088 .slave_configure = iscsi_sw_tcp_slave_configure,
1089 .proc_name = "iscsi_tcp",
1091 .track_queue_depth = 1,
1092 .cmd_size = sizeof(struct iscsi_cmd),
1095 static struct iscsi_transport iscsi_sw_tcp_transport = {
1096 .owner = THIS_MODULE,
1098 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
1100 /* session management */
1101 .create_session = iscsi_sw_tcp_session_create,
1102 .destroy_session = iscsi_sw_tcp_session_destroy,
1103 /* connection management */
1104 .create_conn = iscsi_sw_tcp_conn_create,
1105 .bind_conn = iscsi_sw_tcp_conn_bind,
1106 .destroy_conn = iscsi_sw_tcp_conn_destroy,
1107 .attr_is_visible = iscsi_sw_tcp_attr_is_visible,
1108 .set_param = iscsi_sw_tcp_conn_set_param,
1109 .get_conn_param = iscsi_sw_tcp_conn_get_param,
1110 .get_session_param = iscsi_session_get_param,
1111 .start_conn = iscsi_conn_start,
1112 .stop_conn = iscsi_sw_tcp_conn_stop,
1113 /* iscsi host params */
1114 .get_host_param = iscsi_sw_tcp_host_get_param,
1115 .set_host_param = iscsi_host_set_param,
1117 .send_pdu = iscsi_conn_send_pdu,
1118 .get_stats = iscsi_sw_tcp_conn_get_stats,
1119 /* iscsi task/cmd helpers */
1120 .init_task = iscsi_tcp_task_init,
1121 .xmit_task = iscsi_tcp_task_xmit,
1122 .cleanup_task = iscsi_tcp_cleanup_task,
1123 /* low level pdu helpers */
1124 .xmit_pdu = iscsi_sw_tcp_pdu_xmit,
1125 .init_pdu = iscsi_sw_tcp_pdu_init,
1126 .alloc_pdu = iscsi_sw_tcp_pdu_alloc,
1128 .session_recovery_timedout = iscsi_session_recovery_timedout,
1131 static int __init iscsi_sw_tcp_init(void)
1133 if (iscsi_max_lun < 1) {
1134 printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
1139 iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
1140 &iscsi_sw_tcp_transport);
1141 if (!iscsi_sw_tcp_scsi_transport)
1147 static void __exit iscsi_sw_tcp_exit(void)
1149 iscsi_unregister_transport(&iscsi_sw_tcp_transport);
1152 module_init(iscsi_sw_tcp_init);
1153 module_exit(iscsi_sw_tcp_exit);