#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
SKD_REQ_STATE_TIMEOUT,
};
-enum skd_fit_msg_state {
- SKD_MSG_STATE_IDLE,
- SKD_MSG_STATE_BUSY,
-};
-
enum skd_check_status_action {
SKD_CHECK_STATUS_REPORT_GOOD,
SKD_CHECK_STATUS_REPORT_SMART_ALERT,
};
struct skd_fitmsg_context {
- enum skd_fit_msg_state state;
-
- struct skd_fitmsg_context *next;
-
u32 id;
- u16 outstanding;
u32 length;
struct skd_request_context {
enum skd_req_state state;
- struct skd_request_context *next;
-
u16 id;
u32 fitmsg_id;
u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
u32 timeout_stamp;
- struct skd_fitmsg_context *skmsg_free_list;
struct skd_fitmsg_context *skmsg_table;
- struct skd_request_context *skreq_free_list;
struct skd_request_context *skreq_table;
struct skd_special_context internal_skspcl;
static void skd_send_special_fitmsg(struct skd_device *skdev,
struct skd_special_context *skspcl);
static void skd_request_fn(struct request_queue *rq);
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, blk_status_t status);
+static void skd_end_request(struct skd_device *skdev, struct request *req,
+ blk_status_t status);
static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq);
static void skd_postop_sg_list(struct skd_device *skdev,
const char *skd_drive_state_to_str(int state);
const char *skd_skdev_state_to_str(enum skd_drvr_state state);
static void skd_log_skdev(struct skd_device *skdev, const char *event);
-static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event);
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event);
req = blk_peek_request(q);
if (req == NULL)
break;
- blk_start_request(req);
+ WARN_ON_ONCE(blk_queue_start_tag(q, req));
__blk_end_request_all(req, BLK_STS_IOERR);
}
}
u64 cmdctxt;
u32 timo_slot;
int flush, fua;
+ u32 tag;
if (skdev->state != SKD_DRVR_STATE_ONLINE) {
if (skd_fail_all(q))
}
if (blk_queue_stopped(skdev->queue)) {
- if (skdev->skmsg_free_list == NULL ||
- skdev->skreq_free_list == NULL ||
- skdev->in_flight >= skdev->queue_low_water_mark)
+ if (skdev->in_flight >= skdev->queue_low_water_mark)
/* There is still some kind of shortage */
return;
break;
}
- /* Is a skd_request_context available? */
- skreq = skdev->skreq_free_list;
- if (skreq == NULL) {
- dev_dbg(&skdev->pdev->dev, "Out of req=%p\n", q);
- break;
- }
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
- SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
-
- /* Now we check to see if we can get a fit msg */
- if (skmsg == NULL) {
- if (skdev->skmsg_free_list == NULL) {
- dev_dbg(&skdev->pdev->dev, "Out of msg\n");
- break;
- }
- }
-
- skreq->flush_cmd = 0;
- skreq->n_sg = 0;
- skreq->sg_byte_count = 0;
-
/*
* OK to now dequeue request from q.
*
* the native request. Note that skd_request_context is
* available but is still at the head of the free list.
*/
- blk_start_request(req);
+ WARN_ON_ONCE(blk_queue_start_tag(q, req));
+
+ tag = blk_mq_unique_tag(req);
+ WARN_ONCE(tag >= skd_max_queue_depth,
+ "%#x > %#x (nr_requests = %lu)\n", tag,
+ skd_max_queue_depth, q->nr_requests);
+
+ skreq = &skdev->skreq_table[tag];
+ SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
+ SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
+
+ skreq->id = tag + SKD_ID_RW_REQUEST;
+ skreq->flush_cmd = 0;
+ skreq->n_sg = 0;
+ skreq->sg_byte_count = 0;
+
skreq->req = req;
skreq->fitmsg_id = 0;
if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
dev_dbg(&skdev->pdev->dev, "error Out\n");
- skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
+ skd_end_request(skdev, skreq->req, BLK_STS_RESOURCE);
continue;
}
/* Either a FIT msg is in progress or we have to start one. */
if (skmsg == NULL) {
- /* Are there any FIT msg buffers available? */
- skmsg = skdev->skmsg_free_list;
- if (skmsg == NULL) {
- dev_dbg(&skdev->pdev->dev,
- "Out of msg skdev=%p\n",
- skdev);
- break;
- }
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
-
- skdev->skmsg_free_list = skmsg->next;
-
- skmsg->state = SKD_MSG_STATE_BUSY;
- skmsg->id += SKD_ID_INCR;
+ skmsg = &skdev->skmsg_table[tag];
/* Initialize the FIT msg header */
fmh = &skmsg->msg_buf->fmh;
cpu_to_be32(skreq->sg_byte_count);
/* Complete resource allocations. */
- skdev->skreq_free_list = skreq->next;
skreq->state = SKD_REQ_STATE_BUSY;
skreq->id += SKD_ID_INCR;
blk_stop_queue(skdev->queue);
}
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, blk_status_t error)
+static void skd_end_request(struct skd_device *skdev, struct request *req,
+ blk_status_t error)
{
if (unlikely(error)) {
- struct request *req = skreq->req;
char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
u32 lba = (u32)blk_rq_pos(req);
u32 count = blk_rq_sectors(req);
dev_err(&skdev->pdev->dev,
"Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
- count, skreq->id);
+ count, req->tag);
} else
- dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", skreq->id,
+ dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", req->tag,
error);
- __blk_end_request_all(skreq->req, error);
+ __blk_end_request_all(req, error);
}
static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_fitmsg_context *skmsg)
{
u64 qcmd;
- struct fit_msg_hdr *fmh;
dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
skmsg->mb_dma_address, skdev->in_flight);
qcmd = skmsg->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL;
- fmh = &skmsg->msg_buf->fmh;
- skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
-
if (unlikely(skdev->dbg_level > 1)) {
u8 *bp = (u8 *)skmsg->msg_buf;
int i;
}
static void skd_resolve_req_exception(struct skd_device *skdev,
- struct skd_request_context *skreq)
+ struct skd_request_context *skreq,
+ struct request *req)
{
u8 cmp_status = skreq->completion.status;
switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
case SKD_CHECK_STATUS_REPORT_GOOD:
case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
- skd_end_request(skdev, skreq, BLK_STS_OK);
+ skd_end_request(skdev, req, BLK_STS_OK);
break;
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_requeue_request(skdev->queue, skreq->req);
+ blk_requeue_request(skdev->queue, req);
dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
break;
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
+ if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
- blk_requeue_request(skdev->queue, skreq->req);
+ blk_requeue_request(skdev->queue, req);
break;
}
/* fall through */
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
- skd_end_request(skdev, skreq, BLK_STS_IOERR);
+ skd_end_request(skdev, req, BLK_STS_IOERR);
break;
}
}
static void skd_release_skreq(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- u32 msg_slot;
- struct skd_fitmsg_context *skmsg;
-
u32 timo_slot;
/*
- * Reclaim the FIT msg buffer if this is
- * the first of the requests it carried to
- * be completed. The FIT msg buffer used to
- * send this request cannot be reused until
- * we are sure the s1120 card has copied
- * it to its memory. The FIT msg might have
- * contained several requests. As soon as
- * any of them are completed we know that
- * the entire FIT msg was transferred.
- * Only the first completed request will
- * match the FIT msg buffer id. The FIT
- * msg buffer id is immediately updated.
- * When subsequent requests complete the FIT
- * msg buffer id won't match, so we know
- * quite cheaply that it is already done.
- */
- msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
- SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
-
- skmsg = &skdev->skmsg_table[msg_slot];
- if (skmsg->id == skreq->fitmsg_id) {
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
- SKD_ASSERT(skmsg->outstanding > 0);
- skmsg->outstanding--;
- if (skmsg->outstanding == 0) {
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
- }
- }
-
- /*
* Decrease the number of active requests.
* Also decrements the count in the timeout slot.
*/
*/
skreq->state = SKD_REQ_STATE_IDLE;
skreq->id += SKD_ID_INCR;
- skreq->next = skdev->skreq_free_list;
- skdev->skreq_free_list = skreq;
+}
+
+static struct skd_request_context *skd_skreq_from_rq(struct skd_device *skdev,
+ struct request *rq)
+{
+ struct skd_request_context *skreq;
+ int i;
+
+ for (i = 0, skreq = skdev->skreq_table; i < skdev->num_fitmsg_context;
+ i++, skreq++)
+ if (skreq->req == rq)
+ return skreq;
+
+ return NULL;
}
static int skd_isr_completion_posted(struct skd_device *skdev,
struct fit_completion_entry_v1 *skcmp;
struct fit_comp_error_info *skerr;
u16 req_id;
- u32 req_slot;
+ u32 tag;
+ struct request *rq;
struct skd_request_context *skreq;
u16 cmp_cntxt;
u8 cmp_status;
* r/w request (see skd_start() above) or a special request.
*/
req_id = cmp_cntxt;
- req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
+ tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
/* Is this other than a r/w request? */
- if (req_slot >= skdev->num_req_context) {
+ if (tag >= skdev->num_req_context) {
/*
* This is not a completion for a r/w request.
*/
+ WARN_ON_ONCE(blk_map_queue_find_tag(skdev->queue->
+ queue_tags, tag));
skd_complete_other(skdev, skcmp, skerr);
continue;
}
- skreq = &skdev->skreq_table[req_slot];
+ rq = blk_map_queue_find_tag(skdev->queue->queue_tags, tag);
+ if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
+ tag))
+ continue;
+ skreq = skd_skreq_from_rq(skdev, rq);
/*
* Make sure the request ID for the slot matches.
if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq);
- if (!skreq->req) {
- dev_dbg(&skdev->pdev->dev,
- "NULL backptr skdreq %p, req=0x%x req_id=0x%x\n",
- skreq, skreq->id, req_id);
- } else {
- /*
- * Capture the outcome and post it back to the
- * native request.
- */
- if (likely(cmp_status == SAM_STAT_GOOD))
- skd_end_request(skdev, skreq, BLK_STS_OK);
- else
- skd_resolve_req_exception(skdev, skreq);
- }
+ /* Mark the FIT msg and timeout slot as free. */
+ skd_release_skreq(skdev, skreq);
/*
- * Release the skreq, its FIT msg (if one), timeout slot,
- * and queue depth.
+ * Capture the outcome and post it back to the native request.
*/
- skd_release_skreq(skdev, skreq);
+ if (likely(cmp_status == SAM_STAT_GOOD))
+ skd_end_request(skdev, rq, BLK_STS_OK);
+ else
+ skd_resolve_req_exception(skdev, skreq, rq);
/* skd_isr_comp_limit equal zero means no limit */
if (limit) {
for (i = 0; i < skdev->num_req_context; i++) {
struct skd_request_context *skreq = &skdev->skreq_table[i];
+ struct request *req = skreq->req;
if (skreq->state == SKD_REQ_STATE_BUSY) {
skd_log_skreq(skdev, skreq, "recover");
SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
- SKD_ASSERT(skreq->req != NULL);
+ SKD_ASSERT(req != NULL);
/* Release DMA resources for the request. */
if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq);
- skd_end_request(skdev, skreq, BLK_STS_IOERR);
-
skreq->req = NULL;
skreq->state = SKD_REQ_STATE_IDLE;
skreq->id += SKD_ID_INCR;
- }
- if (i > 0)
- skreq[-1].next = skreq;
- skreq->next = NULL;
- }
- skdev->skreq_free_list = skdev->skreq_table;
-
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
- if (skmsg->state == SKD_MSG_STATE_BUSY) {
- skd_log_skmsg(skdev, skmsg, "salvaged");
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
+ skd_end_request(skdev, req, BLK_STS_IOERR);
}
- if (i > 0)
- skmsg[-1].next = skmsg;
- skmsg->next = NULL;
}
- skdev->skmsg_free_list = skdev->skmsg_table;
for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
skdev->timeout_slot[i] = 0;
skmsg->id = i + SKD_ID_FIT_MSG;
- skmsg->state = SKD_MSG_STATE_IDLE;
skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
SKD_N_FITMSG_BYTES,
&skmsg->mb_dma_address);
"not aligned: msg_buf %p mb_dma_address %#llx\n",
skmsg->msg_buf, skmsg->mb_dma_address);
memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
-
- skmsg->next = &skmsg[1];
}
- /* Free list is in order starting with the 0th entry. */
- skdev->skmsg_table[i - 1].next = NULL;
- skdev->skmsg_free_list = skdev->skmsg_table;
-
err_out:
return rc;
}
struct skd_request_context *skreq;
skreq = &skdev->skreq_table[i];
-
- skreq->id = i + SKD_ID_RW_REQUEST;
skreq->state = SKD_REQ_STATE_IDLE;
-
skreq->sg = kcalloc(skdev->sgs_per_request,
sizeof(struct scatterlist), GFP_KERNEL);
if (skreq->sg == NULL) {
rc = -ENOMEM;
goto err_out;
}
-
- skreq->next = &skreq[1];
}
- /* Free list is in order starting with the 0th entry. */
- skdev->skreq_table[i - 1].next = NULL;
- skdev->skreq_free_list = skdev->skreq_table;
-
err_out:
return rc;
}
goto err_out;
}
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+ q->nr_requests = skd_max_queue_depth / 2;
+ blk_queue_init_tags(q, skd_max_queue_depth, NULL, BLK_TAG_ALLOC_FIFO);
skdev->queue = q;
disk->queue = q;
}
}
-static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
-{
- switch (state) {
- case SKD_MSG_STATE_IDLE:
- return "IDLE";
- case SKD_MSG_STATE_BUSY:
- return "BUSY";
- default:
- return "???";
- }
-}
-
static const char *skd_skreq_state_to_str(enum skd_req_state state)
{
switch (state) {
skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
}
-static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event)
-{
- dev_dbg(&skdev->pdev->dev, "skmsg=%p event='%s'\n", skmsg, event);
- dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x length=%d\n",
- skd_skmsg_state_to_str(skmsg->state), skmsg->state, skmsg->id,
- skmsg->length);
-}
-
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event)
{