ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
void nvmet_execute_keep_alive(struct nvmet_req *req)
struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
list_del(&port->global_entry);
kfree(port->ana_state);
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
+struct workqueue_struct *nvmet_wq;
+EXPORT_SYMBOL_GPL(nvmet_wq);
+
/*
* This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page
list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return;
}
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
- schedule_work(&ctrl->fatal_err_work);
+ queue_work(nvmet_wq, &ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);
}
goto out_free_zbd_work_queue;
}
+ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ if (!nvmet_wq) {
+ error = -ENOMEM;
+ goto out_free_buffered_work_queue;
+ }
+
error = nvmet_init_discovery();
if (error)
- goto out_free_work_queue;
+ goto out_free_nvmet_work_queue;
error = nvmet_init_configfs();
if (error)
out_exit_discovery:
nvmet_exit_discovery();
-out_free_work_queue:
+out_free_nvmet_work_queue:
+ destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue:
destroy_workqueue(zbd_wq);
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
+ destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
return;
iod->rqstdatalen = lsreqbuf_len;
iod->hosthandle = hosthandle;
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
return 0;
}
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
- schedule_work(&rport->ls_work);
+ queue_work(nvmet_wq, &rport->ls_work);
return ret;
}
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
- schedule_work(&rport->ls_work);
+ queue_work(nvmet_wq, &rport->ls_work);
}
return 0;
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock);
- schedule_work(&tport->ls_work);
+ queue_work(nvmet_wq, &tport->ls_work);
return ret;
}
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock);
- schedule_work(&tport->ls_work);
+ queue_work(nvmet_wq, &tport->ls_work);
}
return 0;
tgt_rscn->tport = tgtport->private;
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
- schedule_work(&tgt_rscn->work);
+ queue_work(nvmet_wq, &tgt_rscn->work);
}
static void
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
kref_init(&tfcp_req->ref);
- schedule_work(&tfcp_req->fcp_rcv_work);
+ queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
return 0;
}
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->tio_done_work);
+ queue_work(nvmet_wq, &tfcp_req->tio_done_work);
}
static void
if (abortio)
/* leave the reference while the work item is scheduled */
- WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
else {
/*
* as the io has already had the done callback made,
if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_execute_discard(struct nvmet_req *req)
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_write_zeroes_work(struct work_struct *w)
if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
return BLK_STS_OK;
}
return;
}
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
}
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq;
+extern struct workqueue_struct *nvmet_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
if (req->p.use_workqueue || effects) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq;
- schedule_work(&req->p.work);
+ queue_work(nvmet_wq, &req->p.work);
} else {
rq->end_io_data = req;
blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (disconnect) {
rdma_disconnect(queue->cm_id);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
}
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
/**
if (!queue) {
struct nvmet_rdma_port *port = cm_id->context;
- schedule_delayed_work(&port->repair_work, 0);
+ queue_delayed_work(nvmet_wq, &port->repair_work, 0);
break;
}
fallthrough;
nvmet_rdma_disable_port(port);
ret = nvmet_rdma_enable_port(port);
if (ret)
- schedule_delayed_work(&port->repair_work, 5 * HZ);
+ queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
}
static int nvmet_rdma_add_port(struct nvmet_port *nport)
}
mutex_unlock(&nvmet_rdma_queue_mutex);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
static struct ib_client nvmet_rdma_ib_client = {
spin_lock(&queue->state_lock);
if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING;
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
spin_unlock(&queue->state_lock);
}
goto out;
if (sk->sk_state == TCP_LISTEN)
- schedule_work(&port->accept_work);
+ queue_work(nvmet_wq, &port->accept_work);
out:
read_unlock_bh(&sk->sk_callback_lock);
}
if (sq->qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
queue->nr_cmds = sq->size * 2;
nvmet_unregister_transport(&nvmet_tcp_ops);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
}