BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
}
typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
*fn = special_completion;
return CMD_CTX_INVALID;
}
- *fn = info[cmdid].fn;
+ if (fn)
+ *fn = info[cmdid].fn;
ctx = info[cmdid].ctx;
info[cmdid].fn = special_completion;
info[cmdid].ctx = CMD_CTX_COMPLETED;
iod->offset = offsetof(struct nvme_iod, sg[nseg]);
iod->npages = -1;
iod->length = nbytes;
+ iod->nents = 0;
}
return iod;
struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1;
- dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ if (iod->nents)
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_free_iod(dev, iod);
if (status) {
result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
if (result < 0)
- goto free_iod;
+ goto free_cmdid;
length = result;
cmnd->rw.command_id = cmdid;
return 0;
+ free_cmdid:
+ free_cmdid(nvmeq, cmdid, NULL);
free_iod:
nvme_free_iod(nvmeq->dev, iod);
nomem:
return nvme_submit_admin_cmd(dev, &c, NULL);
}
-static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
- unsigned nsid, dma_addr_t dma_addr)
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+ dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- return nvme_submit_admin_cmd(dev, &c, NULL);
+ return nvme_submit_admin_cmd(dev, &c, result);
}
static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ bio_endio(bio, -EIO);
+ }
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
if (length != cmd.data_len)
status = -ENOMEM;
else
- status = nvme_submit_admin_cmd(dev, &c, NULL);
+ status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
nvme_free_iod(dev, iod);
}
+
+ if (!status && copy_to_user(&ucmd->result, &cmd.result,
+ sizeof(cmd.result)))
+ status = -EFAULT;
+
return status;
}
continue;
res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
- dma_addr + 4096);
+ dma_addr + 4096, NULL);
if (res)
- continue;
+ memset(mem + 4096, 0, 4096);
ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
if (ns)
NVME_LBAF_RP_DEGRADED = 3,
};
+struct nvme_smart_log {
+ __u8 critical_warning;
+ __u8 temperature[2];
+ __u8 avail_spare;
+ __u8 spare_thresh;
+ __u8 percent_used;
+ __u8 rsvd6[26];
+ __u8 data_units_read[16];
+ __u8 data_units_written[16];
+ __u8 host_reads[16];
+ __u8 host_writes[16];
+ __u8 ctrl_busy_time[16];
+ __u8 power_cycles[16];
+ __u8 power_on_hours[16];
+ __u8 unsafe_shutdowns[16];
+ __u8 media_errors[16];
+ __u8 num_err_log_entries[16];
+ __u8 rsvd192[320];
+};
+
+enum {
+ NVME_SMART_CRIT_SPARE = 1 << 0,
+ NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
+ NVME_SMART_CRIT_RELIABILITY = 1 << 2,
+ NVME_SMART_CRIT_MEDIA = 1 << 3,
+ NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
+};
+
struct nvme_lba_range_type {
__u8 type;
__u8 attributes;