drm/amdgpu: Only create err_count sysfs when hw_op is supported
[platform/kernel/linux-starfive.git] / drivers / target / target_core_iblock.c
index 3c462d6..3d1b511 100644 (file)
 #include <linux/file.h>
 #include <linux/module.h>
 #include <linux/scatterlist.h>
+#include <linux/pr.h>
 #include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
 #include <asm/unaligned.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 
 #include "target_core_iblock.h"
+#include "target_core_pr.h"
 
 #define IBLOCK_MAX_BIO_PER_TASK         32     /* max # of bios to submit at a time */
 #define IBLOCK_BIO_POOL_SIZE   128
@@ -309,7 +312,7 @@ static sector_t iblock_get_blocks(struct se_device *dev)
        return blocks_long;
 }
 
-static void iblock_complete_cmd(struct se_cmd *cmd)
+static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status)
 {
        struct iblock_req *ibr = cmd->priv;
        u8 status;
@@ -317,7 +320,9 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
        if (!refcount_dec_and_test(&ibr->pending))
                return;
 
-       if (atomic_read(&ibr->ib_bio_err_cnt))
+       if (blk_status == BLK_STS_RESV_CONFLICT)
+               status = SAM_STAT_RESERVATION_CONFLICT;
+       else if (atomic_read(&ibr->ib_bio_err_cnt))
                status = SAM_STAT_CHECK_CONDITION;
        else
                status = SAM_STAT_GOOD;
@@ -330,6 +335,7 @@ static void iblock_bio_done(struct bio *bio)
 {
        struct se_cmd *cmd = bio->bi_private;
        struct iblock_req *ibr = cmd->priv;
+       blk_status_t blk_status = bio->bi_status;
 
        if (bio->bi_status) {
                pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
@@ -342,7 +348,7 @@ static void iblock_bio_done(struct bio *bio)
 
        bio_put(bio);
 
-       iblock_complete_cmd(cmd);
+       iblock_complete_cmd(cmd, blk_status);
 }
 
 static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
@@ -758,7 +764,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 
        if (!sgl_nents) {
                refcount_set(&ibr->pending, 1);
-               iblock_complete_cmd(cmd);
+               iblock_complete_cmd(cmd, BLK_STS_OK);
                return 0;
        }
 
@@ -816,7 +822,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        }
 
        iblock_submit_bios(&list);
-       iblock_complete_cmd(cmd);
+       iblock_complete_cmd(cmd, BLK_STS_OK);
        return 0;
 
 fail_put_bios:
@@ -828,6 +834,258 @@ fail:
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
+static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
+                                           u64 sa_key, u8 type, bool aptpl)
+{
+       struct se_device *dev = cmd->se_dev;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       struct block_device *bdev = ib_dev->ibd_bd;
+       const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+       int ret;
+
+       if (!ops) {
+               pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+
+       switch (sa) {
+       case PRO_REGISTER:
+       case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+               if (!ops->pr_register) {
+                       pr_err("block device does not support pr_register.\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
+
+               /* The block layer pr ops always enables aptpl */
+               if (!aptpl)
+                       pr_info("APTPL not set by initiator, but will be used.\n");
+
+               ret = ops->pr_register(bdev, key, sa_key,
+                               sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY);
+               break;
+       case PRO_RESERVE:
+               if (!ops->pr_reserve) {
+                       pr_err("block_device does not support pr_reserve.\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
+
+               ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0);
+               break;
+       case PRO_CLEAR:
+               if (!ops->pr_clear) {
+                       pr_err("block_device does not support pr_clear.\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
+
+               ret = ops->pr_clear(bdev, key);
+               break;
+       case PRO_PREEMPT:
+       case PRO_PREEMPT_AND_ABORT:
+               if (!ops->pr_clear) {
+                       pr_err("block_device does not support pr_preempt.\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
+
+               ret = ops->pr_preempt(bdev, key, sa_key,
+                                     scsi_pr_type_to_block(type),
+                                     sa == PRO_PREEMPT_AND_ABORT);
+               break;
+       case PRO_RELEASE:
+               if (!ops->pr_clear) {
+                       pr_err("block_device does not support pr_pclear.\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
+
+               ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type));
+               break;
+       default:
+               pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa);
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+
+       if (!ret)
+               return TCM_NO_SENSE;
+       else if (ret == PR_STS_RESERVATION_CONFLICT)
+               return TCM_RESERVATION_CONFLICT;
+       else
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
+static void iblock_pr_report_caps(unsigned char *param_data)
+{
+       u16 len = 8;
+
+       put_unaligned_be16(len, &param_data[0]);
+       /*
+        * When using the pr_ops passthrough method we only support exporting
+        * the device through one target port because from the backend module
+        * level we can't see the target port config. As a result we only
+        * support registration directly from the I_T nexus the cmd is sent
+        * through and do not set ATP_C here.
+        *
+        * The block layer pr_ops do not support passing in initiators so
+        * we don't set SIP_C here.
+        */
+       /* PTPL_C: Persistence across Target Power Loss bit */
+       param_data[2] |= 0x01;
+       /*
+        * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+        * set the TMV: Task Mask Valid bit.
+        */
+       param_data[3] |= 0x80;
+       /*
+        * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+        */
+       param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+       /*
+        * PTPL_A: Persistence across Target Power Loss Active bit. The block
+        * layer pr ops always enables this so report it active.
+        */
+       param_data[3] |= 0x01;
+       /*
+        * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
+        */
+       param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+       param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+       param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+       param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+       param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+       param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+}
+
+static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd,
+                                         unsigned char *param_data)
+{
+       struct se_device *dev = cmd->se_dev;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       struct block_device *bdev = ib_dev->ibd_bd;
+       const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+       int i, len, paths, data_offset;
+       struct pr_keys *keys;
+       sense_reason_t ret;
+
+       if (!ops) {
+               pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+
+       if (!ops->pr_read_keys) {
+               pr_err("Block device does not support read_keys.\n");
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+
+       /*
+        * We don't know what's under us, but dm-multipath will register every
+        * path with the same key, so start off with enough space for 16 paths.
+        * which is not a lot of memory and should normally be enough.
+        */
+       paths = 16;
+retry:
+       len = 8 * paths;
+       keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL);
+       if (!keys)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+       keys->num_keys = paths;
+       if (!ops->pr_read_keys(bdev, keys)) {
+               if (keys->num_keys > paths) {
+                       kfree(keys);
+                       paths *= 2;
+                       goto retry;
+               }
+       } else {
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               goto free_keys;
+       }
+
+       ret = TCM_NO_SENSE;
+
+       put_unaligned_be32(keys->generation, &param_data[0]);
+       if (!keys->num_keys) {
+               put_unaligned_be32(0, &param_data[4]);
+               goto free_keys;
+       }
+
+       put_unaligned_be32(8 * keys->num_keys, &param_data[4]);
+
+       data_offset = 8;
+       for (i = 0; i < keys->num_keys; i++) {
+               if (data_offset + 8 > cmd->data_length)
+                       break;
+
+               put_unaligned_be64(keys->keys[i], &param_data[data_offset]);
+               data_offset += 8;
+       }
+
+free_keys:
+       kfree(keys);
+       return ret;
+}
+
+static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd,
+                                                unsigned char *param_data)
+{
+       struct se_device *dev = cmd->se_dev;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       struct block_device *bdev = ib_dev->ibd_bd;
+       const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+       struct pr_held_reservation rsv = { };
+
+       if (!ops) {
+               pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+
+       if (!ops->pr_read_reservation) {
+               pr_err("Block device does not support read_keys.\n");
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+
+       if (ops->pr_read_reservation(bdev, &rsv))
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+       put_unaligned_be32(rsv.generation, &param_data[0]);
+       if (!block_pr_type_to_scsi(rsv.type)) {
+               put_unaligned_be32(0, &param_data[4]);
+               return TCM_NO_SENSE;
+       }
+
+       put_unaligned_be32(16, &param_data[4]);
+
+       if (cmd->data_length < 16)
+               return TCM_NO_SENSE;
+       put_unaligned_be64(rsv.key, &param_data[8]);
+
+       if (cmd->data_length < 22)
+               return TCM_NO_SENSE;
+       param_data[21] = block_pr_type_to_scsi(rsv.type);
+
+       return TCM_NO_SENSE;
+}
+
+static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa,
+                                          unsigned char *param_data)
+{
+       sense_reason_t ret = TCM_NO_SENSE;
+
+       switch (sa) {
+       case PRI_REPORT_CAPABILITIES:
+               iblock_pr_report_caps(param_data);
+               break;
+       case PRI_READ_KEYS:
+               ret = iblock_pr_read_keys(cmd, param_data);
+               break;
+       case PRI_READ_RESERVATION:
+               ret = iblock_pr_read_reservation(cmd, param_data);
+               break;
+       default:
+               pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa);
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+
+       return ret;
+}
+
 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
 {
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -868,17 +1126,19 @@ static unsigned int iblock_get_io_opt(struct se_device *dev)
        return bdev_io_opt(bd);
 }
 
-static struct sbc_ops iblock_sbc_ops = {
+static struct exec_cmd_ops iblock_exec_cmd_ops = {
        .execute_rw             = iblock_execute_rw,
        .execute_sync_cache     = iblock_execute_sync_cache,
        .execute_write_same     = iblock_execute_write_same,
        .execute_unmap          = iblock_execute_unmap,
+       .execute_pr_out         = iblock_execute_pr_out,
+       .execute_pr_in          = iblock_execute_pr_in,
 };
 
 static sense_reason_t
 iblock_parse_cdb(struct se_cmd *cmd)
 {
-       return sbc_parse_cdb(cmd, &iblock_sbc_ops);
+       return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops);
 }
 
 static bool iblock_get_write_cache(struct se_device *dev)
@@ -889,6 +1149,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
 static const struct target_backend_ops iblock_ops = {
        .name                   = "iblock",
        .inquiry_prod           = "IBLOCK",
+       .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR,
        .inquiry_rev            = IBLOCK_VERSION,
        .owner                  = THIS_MODULE,
        .attach_hba             = iblock_attach_hba,