struct lpfc_pde6 *pde6 = NULL;
struct lpfc_pde7 *pde7 = NULL;
dma_addr_t dataphysaddr, protphysaddr;
- unsigned short curr_data = 0, curr_prot = 0;
+ unsigned short curr_prot = 0;
unsigned int split_offset;
unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
num_bde++;
- curr_data++;
if (split_offset)
break;
struct scatterlist *sgpe = NULL; /* s/g prot entry */
struct sli4_sge_diseed *diseed = NULL;
dma_addr_t dataphysaddr, protphysaddr;
- unsigned short curr_data = 0, curr_prot = 0;
+ unsigned short curr_prot = 0;
unsigned int split_offset;
unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes;
dma_offset += dma_len;
num_sge++;
- curr_data++;
if (split_offset) {
sgl++;
}
}
+inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+}
+
/**
* lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(eq);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
{
struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
struct lpfc_queue *eq;
- int i = 0;
rcu_read_lock();
list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
- i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ lpfc_sli4_poll_eq(eq);
if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
rcu_read_unlock();
}
-inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
-{
- struct lpfc_hba *phba = eq->phba;
- int i = 0;
-
- /*
- * Unlocking an irq is one of the entry point to check
- * for re-schedule, but we are good for io submission
- * path as midlayer does a get_cpu to glue us in. Flush
- * out the invalidate queue so we can see the updated
- * value for flag.
- */
- smp_rmb();
-
- if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
- /* We will not likely get the completion for the caller
- * during this iteration but i guess that's fine.
- * Future io's coming on this eq should be able to
- * pick it up. As for the case of single io's, they
- * will be handled through a sched from polling timer
- * function which is currently triggered every 1msec.
- */
- i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
-
- return i;
-}
-
static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
return WQE_ERROR;