#define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
-#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0
#define QUEUE_EN_OFFSET 31
#define QUEUE_ID_OFFSET 16
-#define MAX_DEV_CMD_ENTRIES 2
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
u32 utag = blk_mq_unique_tag(req);
u32 hwq = blk_mq_unique_tag_to_hwq(utag);
- /* uhq[0] is used to serve device commands */
- return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+ return &hba->uhq[hwq];
}
/**
hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
- tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
- rw_queues;
+ tot_queues = read_queues + poll_queues + rw_queues;
if (hba_maxq < tot_queues) {
dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
return -EOPNOTSUPP;
}
- rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
+ rem = hba_maxq;
if (rw_queues) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
for (i = 0; i < HCTX_MAX_TYPES; i++)
host->nr_hw_queues += hba->nr_queues[i];
- hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
+ hba->nr_hw_queues = host->nr_hw_queues;
return 0;
}
/* The very first HW queue serves device commands */
hba->dev_cmd_queue = &hba->uhq[0];
- /* Give dev_cmd_queue the minimal number of entries */
- hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
host->host_tagset = 1;
return 0;
struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) {
- hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+ hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq_num = blk_mq_unique_tag_to_hwq(utag);
- hwq = &hba->uhq[hwq_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+ hwq = &hba->uhq[hwq_num];
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);