uint32_t cfg_fcp_io_channel;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
+ uint32_t cfg_nvme_embed_cmd;
uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq;
uint32_t cfg_enable_nvmet;
"Use OAS bit on NVME IOs");
/*
+ * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs
+ *
+ * 0 = Put NVME Command in SGL
+ * 1 = Embed NVME Command in WQE (unless G7)
+ * 2 = Embed NVME Command in WQE (force)
+ *
+ * Value range is [0,2]. Default value is 1.
+ */
+LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
+ "Embed NVME Command in WQE");
+
+/*
* lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
* will advertise it supports to the SCSI layer. This also will map to
* the number of WQs the driver will create.
&dev_attr_lpfc_task_mgmt_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_nvme_oas,
+ &dev_attr_lpfc_nvme_embed_cmd,
&dev_attr_lpfc_auto_imax,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
+ lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
lpfc_auto_imax_init(phba, lpfc_auto_imax);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
#define lpfc_mbx_rd_rev_vpd_WORD word1
uint32_t first_hw_rev;
+#define LPFC_G7_ASIC_1 0xd
uint32_t second_hw_rev;
uint32_t word4_rsvd;
uint32_t third_hw_rev;
phba->fcp_embed_io = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
- "6422 XIB %d: FCP %d %d NVME %d %d %d\n",
+ "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n",
bf_get(cfg_xib, mbx_sli4_parameters),
phba->fcp_embed_pbde, phba->fcp_embed_io,
phba->nvme_support, phba->nvme_embed_pbde,
- phba->cfg_suppress_rsp);
+ phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
(bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
struct lpfc_nvme_buf *lpfc_ncmd,
struct nvmefc_fcp_req *nCmd)
{
+ struct lpfc_hba *phba = vport->phba;
struct sli4_sge *sgl;
union lpfc_wqe128 *wqe;
uint32_t *wptr, *dptr;
/*
+ * Get a local pointer to the built-in wqe and correct
+ * the cmd size to match NVME's 96 bytes and fix
+ * the dma address.
+ */
+
+ /* 128 byte wqe support here */
+ wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
+
+ /*
* Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
* match NVME. NVME sends 96 bytes. Also, use the
* nvme commands command and response dma addresses
*/
sgl = lpfc_ncmd->nvme_sgl;
sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
+ if (phba->cfg_nvme_embed_cmd) {
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
+
+ /* Word 0-2 - NVME CMND IU (embedded payload) */
+ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
+ wqe->generic.bde.tus.f.bdeSize = 56;
+ wqe->generic.bde.addrHigh = 0;
+ wqe->generic.bde.addrLow = 64; /* Word 16 */
+ } else {
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
+
+ /* Word 0-2 - NVME CMND IU Inline BDE */
+ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
+ wqe->generic.bde.addrHigh = sgl->addr_hi;
+ wqe->generic.bde.addrLow = sgl->addr_lo;
+ }
sgl++;
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(nCmd->rsplen);
- /*
- * Get a local pointer to the built-in wqe and correct
- * the cmd size to match NVME's 96 bytes and fix
- * the dma address.
- */
-
- /* 128 byte wqe support here */
- wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
-
- /* Word 0-2 - NVME CMND IU (embedded payload) */
- wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = 56;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 64; /* Word 16 */
-
/* Word 3 */
bf_set(payload_offset_len, &wqe->fcp_icmd,
(nCmd->rsplen + nCmd->cmdlen));
/* Word 10 */
bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
+
+ if (!phba->cfg_nvme_embed_cmd) {
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 0);
+ return;
+ }
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 0);
bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
/*
/* Save information as VPD data */
phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
+
+ /*
+ * This is because first G7 ASIC doesn't support the standard
+ * 0x5a NVME cmd descriptor type/subtype
+ */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6) &&
+ (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
+ (phba->vpd.rev.smRev == 0) &&
+ (phba->cfg_nvme_embed_cmd == 1))
+ phba->cfg_nvme_embed_cmd = 0;
+
phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
&mqe->un.read_rev);
wqe128->generic.bde.addrLow = 88; /* Word 22 */
bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe128->fcp_iwrite.wqe_com, 0);
/* Word 22-29 FCP CMND Payload */
ptr = &wqe128->words[22];
wqe128->generic.bde.addrLow = 88; /* Word 22 */
bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe128->fcp_iread.wqe_com, 0);
/* Word 22-29 FCP CMND Payload */
ptr = &wqe128->words[22];
wqe128->generic.bde.addrLow = 88; /* Word 22 */
bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe128->fcp_icmd.wqe_com, 0);
/* Word 22-29 FCP CMND Payload */
ptr = &wqe128->words[22];