atomic_set(&port->commands[tag].active, 0);
release_slot(port, tag);
- up(&port->cmd_slot);
+ if (unlikely(command->unaligned))
+ up(&port->cmd_slot_unal);
+ else
+ up(&port->cmd_slot);
}
/*
*/
static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
int nsect, int nents, int tag, void *callback,
- void *data, int dir)
+ void *data, int dir, int unaligned)
{
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
command->scatter_ents = nents;
+ command->unaligned = unaligned;
/*
* The number of retries for this command before it is
* reported as a failure to the upper layers.
fis->res3 = 0;
fill_command_sg(dd, command, nents);
+ if (unaligned)
+ fis->device |= 1 << 7;
+
/* Populate the command header */
command->command_header->opts =
__force_bit2int cpu_to_le32(
* return value
* None
*/
-static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
+static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag,
+ int unaligned)
{
+ struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
+ &dd->port->cmd_slot;
release_slot(dd->port, tag);
+ up(sem);
}
/*
* or NULL if no command slots are available.
*/
static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
- int *tag)
+ int *tag, int unaligned)
{
+ struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
+ &dd->port->cmd_slot;
+
/*
* It is possible that, even with this semaphore, a thread
* may think that no command slots are available. Therefore, we
* need to make an attempt to get_slot().
*/
- down(&dd->port->cmd_slot);
+ down(sem);
*tag = get_slot(dd->port);
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
- up(&dd->port->cmd_slot);
+ up(sem);
return NULL;
}
if (unlikely(*tag < 0)) {
- up(&dd->port->cmd_slot);
+ up(sem);
return NULL;
}
dd->mmio + HOST_HSORG);
}
+static int mtip_device_unaligned_constrained(struct driver_data *dd)
+{
+ return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
+}
+
/*
* Detect the details of the product, and store anything needed
* into the driver data structure. This includes product type and
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
dd->work[i].port = dd->port;
+ /* Enable unaligned IO constraints for some devices */
+ if (mtip_device_unaligned_constrained(dd))
+ dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
+ else
+ dd->unal_qdepth = 0;
+
/* Counting semaphore to track command slot usage */
- sema_init(&dd->port->cmd_slot, num_command_slots - 1);
+ sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth);
+ sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
/* Spinlock to prevent concurrent issue */
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
struct scatterlist *sg;
struct bio_vec *bvec;
int nents = 0;
- int tag = 0;
+ int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
return;
}
- sg = mtip_hw_get_scatterlist(dd, &tag);
+ if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
+ dd->unal_qdepth) {
+ if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+ unaligned = 1;
+ else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
+ unaligned = 1;
+ }
+
+ sg = mtip_hw_get_scatterlist(dd, &tag, unaligned);
if (likely(sg != NULL)) {
blk_queue_bounce(queue, &bio);
dev_warn(&dd->pdev->dev,
"Maximum number of SGL entries exceeded\n");
bio_io_error(bio);
- mtip_hw_release_scatterlist(dd, tag);
+ mtip_hw_release_scatterlist(dd, tag, unaligned);
return;
}
tag,
bio_endio,
bio,
- bio_data_dir(bio));
+ bio_data_dir(bio),
+ unaligned);
} else
bio_io_error(bio);
}
#define MTIP_FTL_REBUILD_MAGIC 0xED51
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
+/* unaligned IO handling */
+#define MTIP_MAX_UNALIGNED_SLOTS 8
+
/* Macro to extract the tag bit number from a tag value. */
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
int scatter_ents; /* Number of scatter list entries used */
+ int unaligned; /* command is unaligned on 4k boundary */
+
struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
int retries; /* The number of retries left for this command. */
* command slots available.
*/
struct semaphore cmd_slot;
+
+ /* Semaphore to control queue depth of unaligned IOs */
+ struct semaphore cmd_slot_unal;
+
/* Spinlock for working around command-issue bug. */
spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS];
};
atomic_t irq_workers_active;
int isr_binding;
+
+ int unal_qdepth; /* qdepth of unaligned IO queue */
};
#endif