#define IO_TIMEOUT 30
#define MAX_PRP_POOL 512
-enum nvme_queue_id {
- NVME_ADMIN_Q,
- NVME_IO_Q,
- NVME_Q_NUM,
-};
-
-/*
- * An NVM Express queue. Each device has at least two (one for admin
- * commands and one for I/O commands).
- */
-struct nvme_queue {
- struct nvme_dev *dev;
- struct nvme_command *sq_cmds;
- struct nvme_completion *cqes;
- wait_queue_head_t sq_full;
- u32 __iomem *q_db;
- u16 q_depth;
- s16 cq_vector;
- u16 sq_head;
- u16 sq_tail;
- u16 cq_head;
- u16 qid;
- u8 cq_phase;
- u8 cqe_seen;
- unsigned long cmdid_data[];
-};
-
static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
{
u32 bit = enabled ? NVME_CSTS_RDY : 0;
*/
static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
+ struct nvme_ops *ops;
u16 tail = nvmeq->sq_tail;
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
(ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
+ ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
+ if (ops && ops->submit_cmd) {
+ ops->submit_cmd(nvmeq, cmd);
+ return;
+ }
+
if (++tail == nvmeq->q_depth)
tail = 0;
writel(tail, nvmeq->q_db);
struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
+ struct nvme_ops *ops;
u16 head = nvmeq->cq_head;
u16 phase = nvmeq->cq_phase;
u16 status;
return -ETIMEDOUT;
}
+ ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
+ if (ops && ops->complete_cmd)
+ ops->complete_cmd(nvmeq, cmd);
+
status >>= 1;
if (status) {
printf("ERROR: status = %x, phase = %d, head = %d\n",
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
int qid, int depth)
{
+ struct nvme_ops *ops;
struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
if (!nvmeq)
return NULL;
dev->queue_count++;
dev->queues[qid] = nvmeq;
+ ops = (struct nvme_ops *)dev->udev->driver->ops;
+ if (ops && ops->setup_queue)
+ ops->setup_queue(nvmeq);
+
return nvmeq;
free_queue:
struct nvme_id_ns *id;
int ret;
+ ndev->udev = udev;
INIT_LIST_HEAD(&ndev->namespaces);
if (readl(&ndev->bar->csts) == -1) {
ret = -ENODEV;
/* Represents an NVM Express device. Each nvme_dev is a PCI function. */
struct nvme_dev {
+ struct udevice *udev;
struct list_head node;
struct nvme_queue **queues;
u32 __iomem *dbs;
u32 nn;
};
+/* Admin queue and a single I/O queue. */
+enum nvme_queue_id {
+ NVME_ADMIN_Q,
+ NVME_IO_Q,
+ NVME_Q_NUM,
+};
+
+/*
+ * An NVM Express queue. Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+ struct nvme_dev *dev;
+ struct nvme_command *sq_cmds;
+ struct nvme_completion *cqes;
+ u32 __iomem *q_db;
+ u16 q_depth;
+ s16 cq_vector;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 qid;
+ u8 cq_phase;
+ u8 cqe_seen;
+ unsigned long cmdid_data[];
+};
+
/*
* An NVM Express namespace is equivalent to a SCSI LUN.
* Each namespace is operated as an independent "device".
u8 flbas;
};
+struct nvme_ops {
+ /**
+ * setup_queue - Controller-specific NVM Express queue setup.
+ *
+ * @nvmeq: NVM Express queue
+ * Return: 0 if OK, -ve on error
+ */
+ int (*setup_queue)(struct nvme_queue *nvmeq);
+ /**
+ * submit_cmd - Controller-specific NVM Express command submission.
+ *
+ * If this function pointer is set to NULL, normal command
+ * submission is performed according to the NVM Express spec.
+ *
+ * @nvmeq: NVM Express queue
+ * @cmd: NVM Express command
+ */
+ void (*submit_cmd)(struct nvme_queue *nvmeq, struct nvme_command *cmd);
+ /**
+ * complete_cmd - Controller-specific NVM Express command completion
+ *
+ * @nvmeq: NVM Express queue
+ * @cmd: NVM Express command
+ */
+ void (*complete_cmd)(struct nvme_queue *nvmeq, struct nvme_command *cmd);
+};
+
int nvme_init(struct udevice *udev);
#endif /* __DRIVER_NVME_H__ */