#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
+#include <linux/semaphore.h>
#include "isert_proto.h"
#include "ib_isert.h"
struct isert_cq_desc *cq_desc;
struct ib_device_attr *dev_attr;
int ret = 0, i, j;
+ int max_rx_cqe, max_tx_cqe;
dev_attr = &device->dev_attr;
ret = isert_query_device(ib_dev, dev_attr);
if (ret)
return ret;
+ max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
+ max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
+
/* asign function handlers */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
device->use_fastreg = 1;
isert_cq_rx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
- ISER_MAX_RX_CQ_LEN, i);
+ max_rx_cqe, i);
if (IS_ERR(device->dev_rx_cq[i])) {
ret = PTR_ERR(device->dev_rx_cq[i]);
device->dev_rx_cq[i] = NULL;
isert_cq_tx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
- ISER_MAX_TX_CQ_LEN, i);
+ max_tx_cqe, i);
if (IS_ERR(device->dev_tx_cq[i])) {
ret = PTR_ERR(device->dev_tx_cq[i]);
device->dev_tx_cq[i] = NULL;
{
struct fast_reg_descriptor *fr_desc;
struct isert_device *device = isert_conn->conn_device;
- int i, ret;
+ struct se_session *se_sess = isert_conn->conn->sess->se_sess;
+ struct se_node_acl *se_nacl = se_sess->se_node_acl;
+ int i, ret, tag_num;
+ /*
+ * Setup the number of FRMRs based upon the number of tags
+ * available to session in iscsi_target_locate_portal().
+ */
+ tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
+ tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
- INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
isert_conn->conn_fr_pool_size = 0;
- for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
+ for (i = 0; i < tag_num; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
pr_err("Failed to allocate fast_reg descriptor\n");
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
+ spin_lock_bh(&np->np_thread_lock);
+ if (!np->enabled) {
+ spin_unlock_bh(&np->np_thread_lock);
+ pr_debug("iscsi_np is not enabled, reject connect request\n");
+ return rdma_reject(cma_id, NULL, 0);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
cma_id, cma_id->context);
init_completion(&isert_conn->conn_wait);
init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
- kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
spin_lock_init(&isert_conn->conn_lock);
+ INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
goto out_mr;
}
- if (device->use_fastreg) {
- ret = isert_conn_create_fastreg_pool(isert_conn);
- if (ret) {
- pr_err("Conn: %p failed to create fastreg pool\n",
- isert_conn);
- goto out_fastreg;
- }
- }
-
ret = isert_conn_setup_qp(isert_conn, cma_id);
if (ret)
goto out_conn_dev;
mutex_lock(&isert_np->np_accept_mutex);
- list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
+ list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
- pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
- wake_up(&isert_np->np_accept_wq);
+ pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+ up(&isert_np->np_sem);
return 0;
out_conn_dev:
- if (device->use_fastreg)
- isert_conn_free_fastreg_pool(isert_conn);
-out_fastreg:
ib_dereg_mr(isert_conn->conn_mr);
out_mr:
ib_dealloc_pd(isert_conn->conn_pd);
static void
isert_connected_handler(struct rdma_cm_id *cma_id)
{
- return;
+ struct isert_conn *isert_conn = cma_id->context;
+
+ kref_get(&isert_conn->conn_kref);
}
static void
isert_put_conn(isert_conn);
return;
}
- if (!isert_conn->logout_posted) {
- pr_debug("Calling rdma_disconnect for !logout_posted from"
- " isert_disconnect_work\n");
+
+ if (isert_conn->disconnect) {
+ /* Send DREQ/DREP towards our initiator */
rdma_disconnect(isert_conn->conn_cm_id);
- mutex_unlock(&isert_conn->conn_mutex);
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
- goto wake_up;
}
+
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
complete(&isert_conn->conn_wait);
- isert_put_conn(isert_conn);
}
static void
-isert_disconnected_handler(struct rdma_cm_id *cma_id)
+isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
{
struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
+ isert_conn->disconnect = disconnect;
INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
schedule_work(&isert_conn->conn_logout_work);
}
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
int ret = 0;
+ bool disconnect = false;
pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id);
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
- pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
ret = isert_connect_request(cma_id, event);
break;
case RDMA_CM_EVENT_ESTABLISHED:
- pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
isert_connected_handler(cma_id);
break;
- case RDMA_CM_EVENT_DISCONNECTED:
- pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
- isert_disconnected_handler(cma_id);
- break;
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
+ case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+ disconnect = true;
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
+ isert_disconnected_handler(cma_id, disconnect);
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
default:
- pr_err("Unknown RDMA CMA event: %d\n", event->event);
+ pr_err("Unhandled RDMA CMA event: %d\n", event->event);
break;
}
}
if (!login->login_failed) {
if (login->login_complete) {
+ if (isert_conn->conn_device->use_fastreg) {
+ ret = isert_conn_create_fastreg_pool(isert_conn);
+ if (ret) {
+ pr_err("Conn: %p failed to create"
+ " fastreg pool\n", isert_conn);
+ return ret;
+ }
+ }
+
ret = isert_alloc_rx_descriptors(isert_conn);
if (ret)
return ret;
if (!rc && dump_payload == false && unsol_data)
iscsit_set_unsoliticed_dataout(cmd);
+ else if (dump_payload && imm_data)
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
return 0;
}
}
static void
-isert_put_cmd(struct isert_cmd *isert_cmd)
+isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
- if (cmd->data_direction == DMA_TO_DEVICE)
+ if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
+ /*
+ * Check for special case during comp_err where
+ * WRITE_PENDING has been handed off from core,
+ * but requires an extra target_put_sess_cmd()
+ * before transport_generic_free_cmd() below.
+ */
+ if (comp_err &&
+ cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ }
+ }
device->unreg_rdma_mem(isert_cmd, isert_conn);
transport_generic_free_cmd(&cmd->se_cmd, 0);
static void
isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
- struct ib_device *ib_dev)
+ struct ib_device *ib_dev, bool comp_err)
{
if (isert_cmd->pdu_buf_dma != 0) {
pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
}
isert_unmap_tx_desc(tx_desc, ib_dev);
- isert_put_cmd(isert_cmd);
+ isert_put_cmd(isert_cmd, comp_err);
}
static void
iscsit_tmr_post_handler(cmd, cmd->conn);
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
break;
case ISTATE_SEND_REJECT:
pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
atomic_dec(&isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
break;
case ISTATE_SEND_LOGOUTRSP:
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
- /*
- * Call atomic_dec(&isert_conn->post_send_buf_count)
- * from isert_wait_conn()
- */
- isert_conn->logout_posted = true;
+
+ atomic_dec(&isert_conn->post_send_buf_count);
iscsit_logout_post_handler(cmd, cmd->conn);
break;
case ISTATE_SEND_TEXTRSP:
atomic_dec(&isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
break;
default:
pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
}
static void
wr = &t->isert_cmd->rdma_wr;
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
- isert_completion_put(t, t->isert_cmd, ib_dev);
+ isert_completion_put(t, t->isert_cmd, ib_dev, true);
}
}
wr = &t->isert_cmd->rdma_wr;
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
- isert_completion_put(t, t->isert_cmd, ib_dev);
+ isert_completion_put(t, t->isert_cmd, ib_dev, true);
}
tx_desc->comp_llnode_batch = NULL;
if (!isert_cmd)
isert_unmap_tx_desc(tx_desc, ib_dev);
else
- isert_completion_put(tx_desc, isert_cmd, ib_dev);
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
}
static void
isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->conn_mutex);
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+
complete(&isert_conn->conn_wait_comp_err);
}
int rc;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
- rc = iscsit_build_text_rsp(cmd, conn, hdr);
+ rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
if (rc < 0)
return rc;
pr_err("Unable to allocate struct isert_np\n");
return -ENOMEM;
}
- init_waitqueue_head(&isert_np->np_accept_wq);
+ sema_init(&isert_np->np_sem, 0);
mutex_init(&isert_np->np_accept_mutex);
INIT_LIST_HEAD(&isert_np->np_accept_list);
init_completion(&isert_np->np_login_comp);
}
static int
-isert_check_accept_queue(struct isert_np *isert_np)
-{
- int empty;
-
- mutex_lock(&isert_np->np_accept_mutex);
- empty = list_empty(&isert_np->np_accept_list);
- mutex_unlock(&isert_np->np_accept_mutex);
-
- return empty;
-}
-
-static int
isert_rdma_accept(struct isert_conn *isert_conn)
{
struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
int max_accept = 0, ret;
accept_wait:
- ret = wait_event_interruptible(isert_np->np_accept_wq,
- !isert_check_accept_queue(isert_np) ||
- np->np_thread_state == ISCSI_NP_THREAD_RESET);
+ ret = down_interruptible(&isert_np->np_sem);
if (max_accept > 5)
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
- pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+ pr_debug("np_thread_state %d for isert_accept_np\n",
+ np->np_thread_state);
+ /**
+ * No point in stalling here when np_thread
+ * is in state RESET/SHUTDOWN/EXIT - bail
+ **/
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
struct isert_conn *isert_conn = conn->context;
pr_debug("isert_wait_conn: Starting \n");
- /*
- * Decrement post_send_buf_count for special case when called
- * from isert_do_control_comp() -> iscsit_logout_post_handler()
- */
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->logout_posted)
- atomic_dec(&isert_conn->post_send_buf_count);
- if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+ mutex_lock(&isert_conn->conn_mutex);
+ if (isert_conn->conn_cm_id) {
pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
wait_for_completion(&isert_conn->conn_wait_comp_err);
wait_for_completion(&isert_conn->conn_wait);
+ isert_put_conn(isert_conn);
}
static void isert_free_conn(struct iscsi_conn *conn)
static void __exit isert_exit(void)
{
+ flush_scheduled_work();
destroy_workqueue(isert_comp_wq);
destroy_workqueue(isert_rx_wq);
iscsit_unregister_transport(&iser_target_transport);