struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct adma_port_priv *pp = ap->private_data;
- u8 *buf = pp->pkt;
+ u8 *buf = pp->pkt, *last_buf = NULL;
int i = (2 + buf[3]) * 8;
u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
*(__le32 *)(buf + i) = cpu_to_le32(len);
i += 4;
- if (ata_sg_is_last(sg, qc))
- pFLAGS |= pEND;
+ last_buf = &buf[i];
buf[i++] = pFLAGS;
buf[i++] = qc->dev->dma_mode & 0xf;
buf[i++] = 0; /* pPKLW */
VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
(unsigned long)addr, len);
}
+
+ if (likely(last_buf))
+ *last_buf |= pEND;
+
return i;
}
static void mv_post_int_cmd(struct ata_queued_cmd *qc);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
-static int mv_slave_config(struct scsi_device *sdev);
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
.use_clustering = 1,
.proc_name = DRV_NAME,
.dma_boundary = MV_DMA_BOUNDARY,
- .slave_configure = mv_slave_config,
+ .slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
.use_clustering = 1,
.proc_name = DRV_NAME,
.dma_boundary = MV_DMA_BOUNDARY,
- .slave_configure = mv_slave_config,
+ .slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
{
}
-static int mv_slave_config(struct scsi_device *sdev)
-{
- int rc = ata_scsi_slave_config(sdev);
- if (rc)
- return rc;
-
- blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
-
- return 0; /* scsi layer doesn't check return value, sigh */
-}
-
static void mv_set_edma_ptrs(void __iomem *port_mmio,
struct mv_host_priv *hpriv,
struct mv_port_priv *pp)
{
struct mv_port_priv *pp = qc->ap->private_data;
struct scatterlist *sg;
- struct mv_sg *mv_sg;
+ struct mv_sg *mv_sg, *last_sg = NULL;
mv_sg = pp->sg_tbl;
ata_for_each_sg(sg, qc) {
sg_len -= len;
addr += len;
- if (!sg_len && ata_sg_is_last(sg, qc))
- mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
-
+ last_sg = mv_sg;
mv_sg++;
}
-
}
+
+ if (likely(last_sg))
+ last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
}
static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
struct sil24_sge *sge)
{
struct scatterlist *sg;
+ struct sil24_sge *last_sge = NULL;
ata_for_each_sg(sg, qc) {
sge->addr = cpu_to_le64(sg_dma_address(sg));
sge->cnt = cpu_to_le32(sg_dma_len(sg));
- if (ata_sg_is_last(sg, qc))
- sge->flags = cpu_to_le32(SGE_TRM);
- else
- sge->flags = 0;
+ sge->flags = 0;
+
+ last_sge = sge;
sge++;
}
+
+ if (likely(last_sge))
+ last_sge->flags = cpu_to_le32(SGE_TRM);
}
static int sil24_qc_defer(struct ata_queued_cmd *qc)
u32 ioadl_flags = 0;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+ struct ipr_ioadl_desc *last_ioadl = NULL;
int len = qc->nbytes + qc->pad_len;
struct scatterlist *sg;
ata_for_each_sg(sg, qc) {
ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
ioadl->address = cpu_to_be32(sg_dma_address(sg));
- if (ata_sg_is_last(sg, qc))
- ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
- else
- ioadl++;
+
+ last_ioadl = ioadl;
+ ioadl++;
}
+
+ if (likely(last_ioadl))
+ last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
}
/**