octeontx2-pf: mcs: Generate hash key using ecb(aes)
[platform/kernel/linux-rpi.git] / drivers / scsi / scsi_debug.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44
45 #include <net/checksum.h>
46
47 #include <asm/unaligned.h>
48
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57
58 #include "sd.h"
59 #include "scsi_logging.h"
60
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"   /* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64
65 #define MY_NAME "scsi_debug"
66
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW     0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB    128
161 #define DEF_ZBC_MAX_OPEN_ZONES  8
162 #define DEF_ZBC_NR_CONV_ZONES   1
163
164 #define SDEBUG_LUN_0_VAL 0
165
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE                1
168 #define SDEBUG_OPT_MEDIUM_ERR           2
169 #define SDEBUG_OPT_TIMEOUT              4
170 #define SDEBUG_OPT_RECOVERED_ERR        8
171 #define SDEBUG_OPT_TRANSPORT_ERR        16
172 #define SDEBUG_OPT_DIF_ERR              32
173 #define SDEBUG_OPT_DIX_ERR              64
174 #define SDEBUG_OPT_MAC_TIMEOUT          128
175 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
176 #define SDEBUG_OPT_Q_NOISE              0x200
177 #define SDEBUG_OPT_ALL_TSF              0x400   /* ignore */
178 #define SDEBUG_OPT_RARE_TSF             0x800
179 #define SDEBUG_OPT_N_WCE                0x1000
180 #define SDEBUG_OPT_RESET_NOISE          0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
182 #define SDEBUG_OPT_HOST_BUSY            0x8000
183 #define SDEBUG_OPT_CMD_ABORT            0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185                               SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187                                   SDEBUG_OPT_TRANSPORT_ERR | \
188                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189                                   SDEBUG_OPT_SHORT_TRANSFER | \
190                                   SDEBUG_OPT_HOST_BUSY | \
191                                   SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1     /* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6   /* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN                  1       /* Data-in command (e.g. READ) */
227 #define F_D_OUT                 2       /* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
229 #define F_D_UNKN                8
230 #define F_RL_WLUN_OK            0x10    /* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA               0x20    /* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR           0x40    /* for commands like INQUIRY */
233 #define F_SA_LOW                0x80    /* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH               0x100   /* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP                0x200   /* invalid opcode (not supported) */
236 #define F_FAKE_RW               0x400   /* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS              0x800   /* media access, reacts to SSU state */
238 #define F_SSU_DELAY             0x1000  /* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY            0x2000  /* SYNCHRONIZE CACHE delay */
240
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
246
247 #define SDEBUG_MAX_PARTS 4
248
249 #define SDEBUG_MAX_CMD_LEN 32
250
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252
253 static struct kmem_cache *queued_cmd_cache;
254
255 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
256 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
257
258 /* Zone types (zbcr05 table 25) */
259 enum sdebug_z_type {
260         ZBC_ZTYPE_CNV   = 0x1,
261         ZBC_ZTYPE_SWR   = 0x2,
262         ZBC_ZTYPE_SWP   = 0x3,
263         /* ZBC_ZTYPE_SOBR = 0x4, */
264         ZBC_ZTYPE_GAP   = 0x5,
265 };
266
267 /* enumeration names taken from table 26, zbcr05 */
268 enum sdebug_z_cond {
269         ZBC_NOT_WRITE_POINTER   = 0x0,
270         ZC1_EMPTY               = 0x1,
271         ZC2_IMPLICIT_OPEN       = 0x2,
272         ZC3_EXPLICIT_OPEN       = 0x3,
273         ZC4_CLOSED              = 0x4,
274         ZC6_READ_ONLY           = 0xd,
275         ZC5_FULL                = 0xe,
276         ZC7_OFFLINE             = 0xf,
277 };
278
279 struct sdeb_zone_state {        /* ZBC: per zone state */
280         enum sdebug_z_type z_type;
281         enum sdebug_z_cond z_cond;
282         bool z_non_seq_resource;
283         unsigned int z_size;
284         sector_t z_start;
285         sector_t z_wp;
286 };
287
288 struct sdebug_dev_info {
289         struct list_head dev_list;
290         unsigned int channel;
291         unsigned int target;
292         u64 lun;
293         uuid_t lu_name;
294         struct sdebug_host_info *sdbg_host;
295         unsigned long uas_bm[1];
296         atomic_t stopped;       /* 1: by SSU, 2: device start */
297         bool used;
298
299         /* For ZBC devices */
300         enum blk_zoned_model zmodel;
301         unsigned int zcap;
302         unsigned int zsize;
303         unsigned int zsize_shift;
304         unsigned int nr_zones;
305         unsigned int nr_conv_zones;
306         unsigned int nr_seq_zones;
307         unsigned int nr_imp_open;
308         unsigned int nr_exp_open;
309         unsigned int nr_closed;
310         unsigned int max_open;
311         ktime_t create_ts;      /* time since bootup that this device was created */
312         struct sdeb_zone_state *zstate;
313 };
314
315 struct sdebug_host_info {
316         struct list_head host_list;
317         int si_idx;     /* sdeb_store_info (per host) xarray index */
318         struct Scsi_Host *shost;
319         struct device dev;
320         struct list_head dev_info_list;
321 };
322
323 /* There is an xarray of pointers to this struct's objects, one per host */
324 struct sdeb_store_info {
325         rwlock_t macc_lck;      /* for atomic media access on this store */
326         u8 *storep;             /* user data storage (ram) */
327         struct t10_pi_tuple *dif_storep; /* protection info */
328         void *map_storep;       /* provisioning map */
329 };
330
331 #define dev_to_sdebug_host(d)   \
332         container_of(d, struct sdebug_host_info, dev)
333
334 #define shost_to_sdebug_host(shost)     \
335         dev_to_sdebug_host(shost->dma_dev)
336
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338                       SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
339
340 struct sdebug_defer {
341         struct hrtimer hrt;
342         struct execute_work ew;
343         ktime_t cmpl_ts;/* time since boot to complete this cmd */
344         int issuing_cpu;
345         bool aborted;   /* true when blk_abort_request() already called */
346         enum sdeb_defer_type defer_t;
347 };
348
349 struct sdebug_queued_cmd {
350         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
351          * instance indicates this slot is in use.
352          */
353         struct sdebug_defer sd_dp;
354         struct scsi_cmnd *scmd;
355 };
356
357 struct sdebug_scsi_cmd {
358         spinlock_t   lock;
359 };
360
361 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
362 static atomic_t sdebug_completions;  /* count of deferred completions */
363 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
364 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
365 static atomic_t sdeb_inject_pending;
366 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
367
368 struct opcode_info_t {
369         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
370                                 /* for terminating element */
371         u8 opcode;              /* if num_attached > 0, preferred */
372         u16 sa;                 /* service action */
373         u32 flags;              /* OR-ed set of SDEB_F_* */
374         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
375         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
376         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
377                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
378 };
379
380 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
381 enum sdeb_opcode_index {
382         SDEB_I_INVALID_OPCODE = 0,
383         SDEB_I_INQUIRY = 1,
384         SDEB_I_REPORT_LUNS = 2,
385         SDEB_I_REQUEST_SENSE = 3,
386         SDEB_I_TEST_UNIT_READY = 4,
387         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
388         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
389         SDEB_I_LOG_SENSE = 7,
390         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
391         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
392         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
393         SDEB_I_START_STOP = 11,
394         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
395         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
396         SDEB_I_MAINT_IN = 14,
397         SDEB_I_MAINT_OUT = 15,
398         SDEB_I_VERIFY = 16,             /* VERIFY(10), VERIFY(16) */
399         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
400         SDEB_I_RESERVE = 18,            /* 6, 10 */
401         SDEB_I_RELEASE = 19,            /* 6, 10 */
402         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
403         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
404         SDEB_I_ATA_PT = 22,             /* 12, 16 */
405         SDEB_I_SEND_DIAG = 23,
406         SDEB_I_UNMAP = 24,
407         SDEB_I_WRITE_BUFFER = 25,
408         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
409         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
410         SDEB_I_COMP_WRITE = 28,
411         SDEB_I_PRE_FETCH = 29,          /* 10, 16 */
412         SDEB_I_ZONE_OUT = 30,           /* 0x94+SA; includes no data xfer */
413         SDEB_I_ZONE_IN = 31,            /* 0x95+SA; all have data-in */
414         SDEB_I_LAST_ELEM_P1 = 32,       /* keep this last (previous + 1) */
415 };
416
417
418 static const unsigned char opcode_ind_arr[256] = {
419 /* 0x0; 0x0->0x1f: 6 byte cdbs */
420         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
421             0, 0, 0, 0,
422         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
423         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
424             SDEB_I_RELEASE,
425         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
426             SDEB_I_ALLOW_REMOVAL, 0,
427 /* 0x20; 0x20->0x3f: 10 byte cdbs */
428         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
429         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
430         0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
431         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
432 /* 0x40; 0x40->0x5f: 10 byte cdbs */
433         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
434         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
435         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
436             SDEB_I_RELEASE,
437         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
438 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
439         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
440         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
441         0, SDEB_I_VARIABLE_LEN,
442 /* 0x80; 0x80->0x9f: 16 byte cdbs */
443         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
444         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
445         0, 0, 0, SDEB_I_VERIFY,
446         SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
447         SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
448         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
449 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
450         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
451              SDEB_I_MAINT_OUT, 0, 0, 0,
452         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
453              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
454         0, 0, 0, 0, 0, 0, 0, 0,
455         0, 0, 0, 0, 0, 0, 0, 0,
456 /* 0xc0; 0xc0->0xff: vendor specific */
457         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 };
462
463 /*
464  * The following "response" functions return the SCSI mid-level's 4 byte
465  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
466  * command completion, they can mask their return value with
467  * SDEG_RES_IMMED_MASK .
468  */
469 #define SDEG_RES_IMMED_MASK 0x40000000
470
471 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500
501 static int sdebug_do_add_host(bool mk_new_store);
502 static int sdebug_add_host_helper(int per_host_idx);
503 static void sdebug_do_remove_host(bool the_end);
504 static int sdebug_add_store(void);
505 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
506 static void sdebug_erase_all_stores(bool apart_from_first);
507
508 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
509
510 /*
511  * The following are overflow arrays for cdbs that "hit" the same index in
512  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513  * should be placed in opcode_info_arr[], the others should be placed here.
514  */
515 static const struct opcode_info_t msense_iarr[] = {
516         {0, 0x1a, 0, F_D_IN, NULL, NULL,
517             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519
520 static const struct opcode_info_t mselect_iarr[] = {
521         {0, 0x15, 0, F_D_OUT, NULL, NULL,
522             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 };
524
525 static const struct opcode_info_t read_iarr[] = {
526         {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
528              0, 0, 0, 0} },
529         {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531         {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
533              0xc7, 0, 0, 0, 0} },
534 };
535
536 static const struct opcode_info_t write_iarr[] = {
537         {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
538             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
539                    0, 0, 0, 0, 0, 0} },
540         {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
541             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
542                    0, 0, 0} },
543         {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
544             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545                    0xbf, 0xc7, 0, 0, 0, 0} },
546 };
547
548 static const struct opcode_info_t verify_iarr[] = {
549         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550             NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
551                    0, 0, 0, 0, 0, 0} },
552 };
553
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
558 };
559
560 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
561         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
564         {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
567 };
568
569 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
570         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
576 };
577
578 static const struct opcode_info_t write_same_iarr[] = {
579         {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
582 };
583
584 static const struct opcode_info_t reserve_iarr[] = {
585         {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
586             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588
589 static const struct opcode_info_t release_iarr[] = {
590         {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
591             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 };
593
594 static const struct opcode_info_t sync_cache_iarr[] = {
595         {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
598 };
599
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601         {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602             {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* PRE-FETCH (16) */
604 };
605
606 static const struct opcode_info_t zone_out_iarr[] = {   /* ZONE OUT(16) */
607         {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608             {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* CLOSE ZONE */
610         {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611             {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* FINISH ZONE */
613         {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614             {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
616 };
617
618 static const struct opcode_info_t zone_in_iarr[] = {    /* ZONE IN(16) */
619         {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620             {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
622 };
623
624
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626  * plus the terminating elements for logic that scans this table such as
627  * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
629 /* 0 */
630         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
631             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636              0, 0} },                                   /* REPORT LUNS */
637         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 /* 5 */
642         {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
643             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
644                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645         {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
646             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
647                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
649             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
650              0, 0, 0} },
651         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
652             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
653              0, 0} },
654         {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
656             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 /* 10 */
658         {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659             resp_write_dt0, write_iarr,                 /* WRITE(16) */
660                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662         {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668         {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
671         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
673                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
675 /* 15 */
676         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678         {ARRAY_SIZE(verify_iarr), 0x8f, 0,
679             F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,   /* VERIFY(16) */
680             verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681                           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
684             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
685              0xff, 0xff} },
686         {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
688             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689              0} },
690         {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691             NULL, release_iarr, /* RELEASE(10) <no response function> */
692             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
693              0} },
694 /* 20 */
695         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
702             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703         {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 /* 25 */
706         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708              0, 0, 0, 0} },                     /* WRITE_BUFFER */
709         {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
711                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
712                  0, 0, 0, 0, 0} },
713         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714             resp_sync_cache, sync_cache_iarr,
715             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
717         {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
720         {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721             resp_pre_fetch, pre_fetch_iarr,
722             {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723              0, 0, 0, 0} },                     /* PRE-FETCH (10) */
724
725 /* 30 */
726         {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727             resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728                 {16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729                  0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730         {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731             resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732                 {16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733                  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
734 /* sentinel */
735         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
736             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
737 };
738
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue;       /* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
758 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
759 static int sdebug_no_uld;
760 static int sdebug_num_parts = DEF_NUM_PARTS;
761 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
762 static int sdebug_opt_blks = DEF_OPT_BLKS;
763 static int sdebug_opts = DEF_OPTS;
764 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
765 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
766 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
767 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
768 static int sdebug_sector_size = DEF_SECTOR_SIZE;
769 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
770 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
771 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
772 static unsigned int sdebug_lbpu = DEF_LBPU;
773 static unsigned int sdebug_lbpws = DEF_LBPWS;
774 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
775 static unsigned int sdebug_lbprz = DEF_LBPRZ;
776 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
777 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
778 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
779 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
780 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
781 static int sdebug_uuid_ctl = DEF_UUID_CTL;
782 static bool sdebug_random = DEF_RANDOM;
783 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
784 static bool sdebug_removable = DEF_REMOVABLE;
785 static bool sdebug_clustering;
786 static bool sdebug_host_lock = DEF_HOST_LOCK;
787 static bool sdebug_strict = DEF_STRICT;
788 static bool sdebug_any_injecting_opt;
789 static bool sdebug_no_rwlock;
790 static bool sdebug_verbose;
791 static bool have_dif_prot;
792 static bool write_since_sync;
793 static bool sdebug_statistics = DEF_STATISTICS;
794 static bool sdebug_wp;
795 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
796 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
797 static char *sdeb_zbc_model_s;
798
799 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
800                           SAM_LUN_AM_FLAT = 0x1,
801                           SAM_LUN_AM_LOGICAL_UNIT = 0x2,
802                           SAM_LUN_AM_EXTENDED = 0x3};
803 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
804 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
805
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity;        /* in sectors */
808
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810    may still need them */
811 static int sdebug_heads;                /* heads per disk */
812 static int sdebug_cylinders_per;        /* cylinders per surface */
813 static int sdebug_sectors_per;          /* sectors per cylinder */
814
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_MUTEX(sdebug_host_list_mutex);
817
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1;         /* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
823
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
833
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use;    /* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_cap_mb;
837 static int sdeb_zbc_zone_size_mb;
838 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
839 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
840
841 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
842 static int poll_queues; /* iouring iopoll interface.*/
843
844 static DEFINE_RWLOCK(atomic_rw);
845 static DEFINE_RWLOCK(atomic_rw2);
846
847 static rwlock_t *ramdisk_lck_a[2];
848
849 static char sdebug_proc_name[] = MY_NAME;
850 static const char *my_name = MY_NAME;
851
852 static struct bus_type pseudo_lld_bus;
853
854 static struct device_driver sdebug_driverfs_driver = {
855         .name           = sdebug_proc_name,
856         .bus            = &pseudo_lld_bus,
857 };
858
859 static const int check_condition_result =
860         SAM_STAT_CHECK_CONDITION;
861
862 static const int illegal_condition_result =
863         (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
864
865 static const int device_qfull_result =
866         (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
867
868 static const int condition_met_result = SAM_STAT_CONDITION_MET;
869
870
871 /* Only do the extra work involved in logical block provisioning if one or
872  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
873  * real reads and writes (i.e. not skipping them for speed).
874  */
875 static inline bool scsi_debug_lbp(void)
876 {
877         return 0 == sdebug_fake_rw &&
878                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
879 }
880
881 static void *lba2fake_store(struct sdeb_store_info *sip,
882                             unsigned long long lba)
883 {
884         struct sdeb_store_info *lsip = sip;
885
886         lba = do_div(lba, sdebug_store_sectors);
887         if (!sip || !sip->storep) {
888                 WARN_ON_ONCE(true);
889                 lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
890         }
891         return lsip->storep + lba * sdebug_sector_size;
892 }
893
894 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
895                                       sector_t sector)
896 {
897         sector = sector_div(sector, sdebug_store_sectors);
898
899         return sip->dif_storep + sector;
900 }
901
902 static void sdebug_max_tgts_luns(void)
903 {
904         struct sdebug_host_info *sdbg_host;
905         struct Scsi_Host *hpnt;
906
907         mutex_lock(&sdebug_host_list_mutex);
908         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
909                 hpnt = sdbg_host->shost;
910                 if ((hpnt->this_id >= 0) &&
911                     (sdebug_num_tgts > hpnt->this_id))
912                         hpnt->max_id = sdebug_num_tgts + 1;
913                 else
914                         hpnt->max_id = sdebug_num_tgts;
915                 /* sdebug_max_luns; */
916                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
917         }
918         mutex_unlock(&sdebug_host_list_mutex);
919 }
920
921 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
922
923 /* Set in_bit to -1 to indicate no bit position of invalid field */
924 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
925                                  enum sdeb_cmd_data c_d,
926                                  int in_byte, int in_bit)
927 {
928         unsigned char *sbuff;
929         u8 sks[4];
930         int sl, asc;
931
932         sbuff = scp->sense_buffer;
933         if (!sbuff) {
934                 sdev_printk(KERN_ERR, scp->device,
935                             "%s: sense_buffer is NULL\n", __func__);
936                 return;
937         }
938         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
939         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
940         scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
941         memset(sks, 0, sizeof(sks));
942         sks[0] = 0x80;
943         if (c_d)
944                 sks[0] |= 0x40;
945         if (in_bit >= 0) {
946                 sks[0] |= 0x8;
947                 sks[0] |= 0x7 & in_bit;
948         }
949         put_unaligned_be16(in_byte, sks + 1);
950         if (sdebug_dsense) {
951                 sl = sbuff[7] + 8;
952                 sbuff[7] = sl;
953                 sbuff[sl] = 0x2;
954                 sbuff[sl + 1] = 0x6;
955                 memcpy(sbuff + sl + 4, sks, 3);
956         } else
957                 memcpy(sbuff + 15, sks, 3);
958         if (sdebug_verbose)
959                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
960                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
961                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
962 }
963
964 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
965 {
966         if (!scp->sense_buffer) {
967                 sdev_printk(KERN_ERR, scp->device,
968                             "%s: sense_buffer is NULL\n", __func__);
969                 return;
970         }
971         memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
972
973         scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
974
975         if (sdebug_verbose)
976                 sdev_printk(KERN_INFO, scp->device,
977                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
978                             my_name, key, asc, asq);
979 }
980
981 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
982 {
983         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
984 }
985
986 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
987                             void __user *arg)
988 {
989         if (sdebug_verbose) {
990                 if (0x1261 == cmd)
991                         sdev_printk(KERN_INFO, dev,
992                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
993                 else if (0x5331 == cmd)
994                         sdev_printk(KERN_INFO, dev,
995                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
996                                     __func__);
997                 else
998                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
999                                     __func__, cmd);
1000         }
1001         return -EINVAL;
1002         /* return -ENOTTY; // correct return but upsets fdisk */
1003 }
1004
1005 static void config_cdb_len(struct scsi_device *sdev)
1006 {
1007         switch (sdebug_cdb_len) {
1008         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1009                 sdev->use_10_for_rw = false;
1010                 sdev->use_16_for_rw = false;
1011                 sdev->use_10_for_ms = false;
1012                 break;
1013         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1014                 sdev->use_10_for_rw = true;
1015                 sdev->use_16_for_rw = false;
1016                 sdev->use_10_for_ms = false;
1017                 break;
1018         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1019                 sdev->use_10_for_rw = true;
1020                 sdev->use_16_for_rw = false;
1021                 sdev->use_10_for_ms = true;
1022                 break;
1023         case 16:
1024                 sdev->use_10_for_rw = false;
1025                 sdev->use_16_for_rw = true;
1026                 sdev->use_10_for_ms = true;
1027                 break;
1028         case 32: /* No knobs to suggest this so same as 16 for now */
1029                 sdev->use_10_for_rw = false;
1030                 sdev->use_16_for_rw = true;
1031                 sdev->use_10_for_ms = true;
1032                 break;
1033         default:
1034                 pr_warn("unexpected cdb_len=%d, force to 10\n",
1035                         sdebug_cdb_len);
1036                 sdev->use_10_for_rw = true;
1037                 sdev->use_16_for_rw = false;
1038                 sdev->use_10_for_ms = false;
1039                 sdebug_cdb_len = 10;
1040                 break;
1041         }
1042 }
1043
1044 static void all_config_cdb_len(void)
1045 {
1046         struct sdebug_host_info *sdbg_host;
1047         struct Scsi_Host *shost;
1048         struct scsi_device *sdev;
1049
1050         mutex_lock(&sdebug_host_list_mutex);
1051         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1052                 shost = sdbg_host->shost;
1053                 shost_for_each_device(sdev, shost) {
1054                         config_cdb_len(sdev);
1055                 }
1056         }
1057         mutex_unlock(&sdebug_host_list_mutex);
1058 }
1059
1060 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1061 {
1062         struct sdebug_host_info *sdhp = devip->sdbg_host;
1063         struct sdebug_dev_info *dp;
1064
1065         list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1066                 if ((devip->sdbg_host == dp->sdbg_host) &&
1067                     (devip->target == dp->target)) {
1068                         clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1069                 }
1070         }
1071 }
1072
1073 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1074 {
1075         int k;
1076
1077         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1078         if (k != SDEBUG_NUM_UAS) {
1079                 const char *cp = NULL;
1080
1081                 switch (k) {
1082                 case SDEBUG_UA_POR:
1083                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1084                                         POWER_ON_RESET_ASCQ);
1085                         if (sdebug_verbose)
1086                                 cp = "power on reset";
1087                         break;
1088                 case SDEBUG_UA_POOCCUR:
1089                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1090                                         POWER_ON_OCCURRED_ASCQ);
1091                         if (sdebug_verbose)
1092                                 cp = "power on occurred";
1093                         break;
1094                 case SDEBUG_UA_BUS_RESET:
1095                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1096                                         BUS_RESET_ASCQ);
1097                         if (sdebug_verbose)
1098                                 cp = "bus reset";
1099                         break;
1100                 case SDEBUG_UA_MODE_CHANGED:
1101                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1102                                         MODE_CHANGED_ASCQ);
1103                         if (sdebug_verbose)
1104                                 cp = "mode parameters changed";
1105                         break;
1106                 case SDEBUG_UA_CAPACITY_CHANGED:
1107                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1108                                         CAPACITY_CHANGED_ASCQ);
1109                         if (sdebug_verbose)
1110                                 cp = "capacity data changed";
1111                         break;
1112                 case SDEBUG_UA_MICROCODE_CHANGED:
1113                         mk_sense_buffer(scp, UNIT_ATTENTION,
1114                                         TARGET_CHANGED_ASC,
1115                                         MICROCODE_CHANGED_ASCQ);
1116                         if (sdebug_verbose)
1117                                 cp = "microcode has been changed";
1118                         break;
1119                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1120                         mk_sense_buffer(scp, UNIT_ATTENTION,
1121                                         TARGET_CHANGED_ASC,
1122                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
1123                         if (sdebug_verbose)
1124                                 cp = "microcode has been changed without reset";
1125                         break;
1126                 case SDEBUG_UA_LUNS_CHANGED:
1127                         /*
1128                          * SPC-3 behavior is to report a UNIT ATTENTION with
1129                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1130                          * on the target, until a REPORT LUNS command is
1131                          * received.  SPC-4 behavior is to report it only once.
1132                          * NOTE:  sdebug_scsi_level does not use the same
1133                          * values as struct scsi_device->scsi_level.
1134                          */
1135                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
1136                                 clear_luns_changed_on_target(devip);
1137                         mk_sense_buffer(scp, UNIT_ATTENTION,
1138                                         TARGET_CHANGED_ASC,
1139                                         LUNS_CHANGED_ASCQ);
1140                         if (sdebug_verbose)
1141                                 cp = "reported luns data has changed";
1142                         break;
1143                 default:
1144                         pr_warn("unexpected unit attention code=%d\n", k);
1145                         if (sdebug_verbose)
1146                                 cp = "unknown";
1147                         break;
1148                 }
1149                 clear_bit(k, devip->uas_bm);
1150                 if (sdebug_verbose)
1151                         sdev_printk(KERN_INFO, scp->device,
1152                                    "%s reports: Unit attention: %s\n",
1153                                    my_name, cp);
1154                 return check_condition_result;
1155         }
1156         return 0;
1157 }
1158
1159 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1160 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1161                                 int arr_len)
1162 {
1163         int act_len;
1164         struct scsi_data_buffer *sdb = &scp->sdb;
1165
1166         if (!sdb->length)
1167                 return 0;
1168         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1169                 return DID_ERROR << 16;
1170
1171         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1172                                       arr, arr_len);
1173         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1174
1175         return 0;
1176 }
1177
1178 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1179  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1180  * calls, not required to write in ascending offset order. Assumes resid
1181  * set to scsi_bufflen() prior to any calls.
1182  */
1183 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1184                                   int arr_len, unsigned int off_dst)
1185 {
1186         unsigned int act_len, n;
1187         struct scsi_data_buffer *sdb = &scp->sdb;
1188         off_t skip = off_dst;
1189
1190         if (sdb->length <= off_dst)
1191                 return 0;
1192         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1193                 return DID_ERROR << 16;
1194
1195         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1196                                        arr, arr_len, skip);
1197         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1198                  __func__, off_dst, scsi_bufflen(scp), act_len,
1199                  scsi_get_resid(scp));
1200         n = scsi_bufflen(scp) - (off_dst + act_len);
1201         scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1202         return 0;
1203 }
1204
1205 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1206  * 'arr' or -1 if error.
1207  */
1208 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1209                                int arr_len)
1210 {
1211         if (!scsi_bufflen(scp))
1212                 return 0;
1213         if (scp->sc_data_direction != DMA_TO_DEVICE)
1214                 return -1;
1215
1216         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1217 }
1218
1219
1220 static char sdebug_inq_vendor_id[9] = "Linux   ";
1221 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1222 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1223 /* Use some locally assigned NAAs for SAS addresses. */
1224 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1225 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1226 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1227
1228 /* Device identification VPD page. Returns number of bytes placed in arr */
1229 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1230                           int target_dev_id, int dev_id_num,
1231                           const char *dev_id_str, int dev_id_str_len,
1232                           const uuid_t *lu_name)
1233 {
1234         int num, port_a;
1235         char b[32];
1236
1237         port_a = target_dev_id + 1;
1238         /* T10 vendor identifier field format (faked) */
1239         arr[0] = 0x2;   /* ASCII */
1240         arr[1] = 0x1;
1241         arr[2] = 0x0;
1242         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1243         memcpy(&arr[12], sdebug_inq_product_id, 16);
1244         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1245         num = 8 + 16 + dev_id_str_len;
1246         arr[3] = num;
1247         num += 4;
1248         if (dev_id_num >= 0) {
1249                 if (sdebug_uuid_ctl) {
1250                         /* Locally assigned UUID */
1251                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1252                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1253                         arr[num++] = 0x0;
1254                         arr[num++] = 0x12;
1255                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1256                         arr[num++] = 0x0;
1257                         memcpy(arr + num, lu_name, 16);
1258                         num += 16;
1259                 } else {
1260                         /* NAA-3, Logical unit identifier (binary) */
1261                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1262                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1263                         arr[num++] = 0x0;
1264                         arr[num++] = 0x8;
1265                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1266                         num += 8;
1267                 }
1268                 /* Target relative port number */
1269                 arr[num++] = 0x61;      /* proto=sas, binary */
1270                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1271                 arr[num++] = 0x0;       /* reserved */
1272                 arr[num++] = 0x4;       /* length */
1273                 arr[num++] = 0x0;       /* reserved */
1274                 arr[num++] = 0x0;       /* reserved */
1275                 arr[num++] = 0x0;
1276                 arr[num++] = 0x1;       /* relative port A */
1277         }
1278         /* NAA-3, Target port identifier */
1279         arr[num++] = 0x61;      /* proto=sas, binary */
1280         arr[num++] = 0x93;      /* piv=1, target port, naa */
1281         arr[num++] = 0x0;
1282         arr[num++] = 0x8;
1283         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1284         num += 8;
1285         /* NAA-3, Target port group identifier */
1286         arr[num++] = 0x61;      /* proto=sas, binary */
1287         arr[num++] = 0x95;      /* piv=1, target port group id */
1288         arr[num++] = 0x0;
1289         arr[num++] = 0x4;
1290         arr[num++] = 0;
1291         arr[num++] = 0;
1292         put_unaligned_be16(port_group_id, arr + num);
1293         num += 2;
1294         /* NAA-3, Target device identifier */
1295         arr[num++] = 0x61;      /* proto=sas, binary */
1296         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1297         arr[num++] = 0x0;
1298         arr[num++] = 0x8;
1299         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1300         num += 8;
1301         /* SCSI name string: Target device identifier */
1302         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1303         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1304         arr[num++] = 0x0;
1305         arr[num++] = 24;
1306         memcpy(arr + num, "naa.32222220", 12);
1307         num += 12;
1308         snprintf(b, sizeof(b), "%08X", target_dev_id);
1309         memcpy(arr + num, b, 8);
1310         num += 8;
1311         memset(arr + num, 0, 4);
1312         num += 4;
1313         return num;
1314 }
1315
1316 static unsigned char vpd84_data[] = {
1317 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1318     0x22,0x22,0x22,0x0,0xbb,0x1,
1319     0x22,0x22,0x22,0x0,0xbb,0x2,
1320 };
1321
1322 /*  Software interface identification VPD page */
1323 static int inquiry_vpd_84(unsigned char *arr)
1324 {
1325         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1326         return sizeof(vpd84_data);
1327 }
1328
1329 /* Management network addresses VPD page */
1330 static int inquiry_vpd_85(unsigned char *arr)
1331 {
1332         int num = 0;
1333         const char *na1 = "https://www.kernel.org/config";
1334         const char *na2 = "http://www.kernel.org/log";
1335         int plen, olen;
1336
1337         arr[num++] = 0x1;       /* lu, storage config */
1338         arr[num++] = 0x0;       /* reserved */
1339         arr[num++] = 0x0;
1340         olen = strlen(na1);
1341         plen = olen + 1;
1342         if (plen % 4)
1343                 plen = ((plen / 4) + 1) * 4;
1344         arr[num++] = plen;      /* length, null termianted, padded */
1345         memcpy(arr + num, na1, olen);
1346         memset(arr + num + olen, 0, plen - olen);
1347         num += plen;
1348
1349         arr[num++] = 0x4;       /* lu, logging */
1350         arr[num++] = 0x0;       /* reserved */
1351         arr[num++] = 0x0;
1352         olen = strlen(na2);
1353         plen = olen + 1;
1354         if (plen % 4)
1355                 plen = ((plen / 4) + 1) * 4;
1356         arr[num++] = plen;      /* length, null terminated, padded */
1357         memcpy(arr + num, na2, olen);
1358         memset(arr + num + olen, 0, plen - olen);
1359         num += plen;
1360
1361         return num;
1362 }
1363
1364 /* SCSI ports VPD page */
1365 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1366 {
1367         int num = 0;
1368         int port_a, port_b;
1369
1370         port_a = target_dev_id + 1;
1371         port_b = port_a + 1;
1372         arr[num++] = 0x0;       /* reserved */
1373         arr[num++] = 0x0;       /* reserved */
1374         arr[num++] = 0x0;
1375         arr[num++] = 0x1;       /* relative port 1 (primary) */
1376         memset(arr + num, 0, 6);
1377         num += 6;
1378         arr[num++] = 0x0;
1379         arr[num++] = 12;        /* length tp descriptor */
1380         /* naa-5 target port identifier (A) */
1381         arr[num++] = 0x61;      /* proto=sas, binary */
1382         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1383         arr[num++] = 0x0;       /* reserved */
1384         arr[num++] = 0x8;       /* length */
1385         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1386         num += 8;
1387         arr[num++] = 0x0;       /* reserved */
1388         arr[num++] = 0x0;       /* reserved */
1389         arr[num++] = 0x0;
1390         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1391         memset(arr + num, 0, 6);
1392         num += 6;
1393         arr[num++] = 0x0;
1394         arr[num++] = 12;        /* length tp descriptor */
1395         /* naa-5 target port identifier (B) */
1396         arr[num++] = 0x61;      /* proto=sas, binary */
1397         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1398         arr[num++] = 0x0;       /* reserved */
1399         arr[num++] = 0x8;       /* length */
1400         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1401         num += 8;
1402
1403         return num;
1404 }
1405
1406
1407 static unsigned char vpd89_data[] = {
1408 /* from 4th byte */ 0,0,0,0,
1409 'l','i','n','u','x',' ',' ',' ',
1410 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1411 '1','2','3','4',
1412 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1413 0xec,0,0,0,
1414 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1415 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1416 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1417 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1418 0x53,0x41,
1419 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1420 0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1422 0x10,0x80,
1423 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1424 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1425 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1427 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1428 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1429 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1434 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1435 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1436 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1449 };
1450
1451 /* ATA Information VPD page */
1452 static int inquiry_vpd_89(unsigned char *arr)
1453 {
1454         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1455         return sizeof(vpd89_data);
1456 }
1457
1458
1459 static unsigned char vpdb0_data[] = {
1460         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1461         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1462         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1463         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1464 };
1465
1466 /* Block limits VPD page (SBC-3) */
1467 static int inquiry_vpd_b0(unsigned char *arr)
1468 {
1469         unsigned int gran;
1470
1471         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1472
1473         /* Optimal transfer length granularity */
1474         if (sdebug_opt_xferlen_exp != 0 &&
1475             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1476                 gran = 1 << sdebug_opt_xferlen_exp;
1477         else
1478                 gran = 1 << sdebug_physblk_exp;
1479         put_unaligned_be16(gran, arr + 2);
1480
1481         /* Maximum Transfer Length */
1482         if (sdebug_store_sectors > 0x400)
1483                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1484
1485         /* Optimal Transfer Length */
1486         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1487
1488         if (sdebug_lbpu) {
1489                 /* Maximum Unmap LBA Count */
1490                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1491
1492                 /* Maximum Unmap Block Descriptor Count */
1493                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1494         }
1495
1496         /* Unmap Granularity Alignment */
1497         if (sdebug_unmap_alignment) {
1498                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1499                 arr[28] |= 0x80; /* UGAVALID */
1500         }
1501
1502         /* Optimal Unmap Granularity */
1503         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1504
1505         /* Maximum WRITE SAME Length */
1506         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1507
1508         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1509 }
1510
1511 /* Block device characteristics VPD page (SBC-3) */
1512 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1513 {
1514         memset(arr, 0, 0x3c);
1515         arr[0] = 0;
1516         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1517         arr[2] = 0;
1518         arr[3] = 5;     /* less than 1.8" */
1519         if (devip->zmodel == BLK_ZONED_HA)
1520                 arr[4] = 1 << 4;        /* zoned field = 01b */
1521
1522         return 0x3c;
1523 }
1524
1525 /* Logical block provisioning VPD page (SBC-4) */
1526 static int inquiry_vpd_b2(unsigned char *arr)
1527 {
1528         memset(arr, 0, 0x4);
1529         arr[0] = 0;                     /* threshold exponent */
1530         if (sdebug_lbpu)
1531                 arr[1] = 1 << 7;
1532         if (sdebug_lbpws)
1533                 arr[1] |= 1 << 6;
1534         if (sdebug_lbpws10)
1535                 arr[1] |= 1 << 5;
1536         if (sdebug_lbprz && scsi_debug_lbp())
1537                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1538         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1539         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1540         /* threshold_percentage=0 */
1541         return 0x4;
1542 }
1543
1544 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1545 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1546 {
1547         memset(arr, 0, 0x3c);
1548         arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1549         /*
1550          * Set Optimal number of open sequential write preferred zones and
1551          * Optimal number of non-sequentially written sequential write
1552          * preferred zones fields to 'not reported' (0xffffffff). Leave other
1553          * fields set to zero, apart from Max. number of open swrz_s field.
1554          */
1555         put_unaligned_be32(0xffffffff, &arr[4]);
1556         put_unaligned_be32(0xffffffff, &arr[8]);
1557         if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1558                 put_unaligned_be32(devip->max_open, &arr[12]);
1559         else
1560                 put_unaligned_be32(0xffffffff, &arr[12]);
1561         if (devip->zcap < devip->zsize) {
1562                 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1563                 put_unaligned_be64(devip->zsize, &arr[20]);
1564         } else {
1565                 arr[19] = 0;
1566         }
1567         return 0x3c;
1568 }
1569
1570 #define SDEBUG_LONG_INQ_SZ 96
1571 #define SDEBUG_MAX_INQ_ARR_SZ 584
1572
1573 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1574 {
1575         unsigned char pq_pdt;
1576         unsigned char *arr;
1577         unsigned char *cmd = scp->cmnd;
1578         u32 alloc_len, n;
1579         int ret;
1580         bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1581
1582         alloc_len = get_unaligned_be16(cmd + 3);
1583         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1584         if (! arr)
1585                 return DID_REQUEUE << 16;
1586         is_disk = (sdebug_ptype == TYPE_DISK);
1587         is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1588         is_disk_zbc = (is_disk || is_zbc);
1589         have_wlun = scsi_is_wlun(scp->device->lun);
1590         if (have_wlun)
1591                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1592         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1593                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1594         else
1595                 pq_pdt = (sdebug_ptype & 0x1f);
1596         arr[0] = pq_pdt;
1597         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1598                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1599                 kfree(arr);
1600                 return check_condition_result;
1601         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1602                 int lu_id_num, port_group_id, target_dev_id;
1603                 u32 len;
1604                 char lu_id_str[6];
1605                 int host_no = devip->sdbg_host->shost->host_no;
1606                 
1607                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1608                     (devip->channel & 0x7f);
1609                 if (sdebug_vpd_use_hostno == 0)
1610                         host_no = 0;
1611                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1612                             (devip->target * 1000) + devip->lun);
1613                 target_dev_id = ((host_no + 1) * 2000) +
1614                                  (devip->target * 1000) - 3;
1615                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1616                 if (0 == cmd[2]) { /* supported vital product data pages */
1617                         arr[1] = cmd[2];        /*sanity */
1618                         n = 4;
1619                         arr[n++] = 0x0;   /* this page */
1620                         arr[n++] = 0x80;  /* unit serial number */
1621                         arr[n++] = 0x83;  /* device identification */
1622                         arr[n++] = 0x84;  /* software interface ident. */
1623                         arr[n++] = 0x85;  /* management network addresses */
1624                         arr[n++] = 0x86;  /* extended inquiry */
1625                         arr[n++] = 0x87;  /* mode page policy */
1626                         arr[n++] = 0x88;  /* SCSI ports */
1627                         if (is_disk_zbc) {        /* SBC or ZBC */
1628                                 arr[n++] = 0x89;  /* ATA information */
1629                                 arr[n++] = 0xb0;  /* Block limits */
1630                                 arr[n++] = 0xb1;  /* Block characteristics */
1631                                 if (is_disk)
1632                                         arr[n++] = 0xb2;  /* LB Provisioning */
1633                                 if (is_zbc)
1634                                         arr[n++] = 0xb6;  /* ZB dev. char. */
1635                         }
1636                         arr[3] = n - 4;   /* number of supported VPD pages */
1637                 } else if (0x80 == cmd[2]) { /* unit serial number */
1638                         arr[1] = cmd[2];        /*sanity */
1639                         arr[3] = len;
1640                         memcpy(&arr[4], lu_id_str, len);
1641                 } else if (0x83 == cmd[2]) { /* device identification */
1642                         arr[1] = cmd[2];        /*sanity */
1643                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1644                                                 target_dev_id, lu_id_num,
1645                                                 lu_id_str, len,
1646                                                 &devip->lu_name);
1647                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1648                         arr[1] = cmd[2];        /*sanity */
1649                         arr[3] = inquiry_vpd_84(&arr[4]);
1650                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1651                         arr[1] = cmd[2];        /*sanity */
1652                         arr[3] = inquiry_vpd_85(&arr[4]);
1653                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1654                         arr[1] = cmd[2];        /*sanity */
1655                         arr[3] = 0x3c;  /* number of following entries */
1656                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1657                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1658                         else if (have_dif_prot)
1659                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1660                         else
1661                                 arr[4] = 0x0;   /* no protection stuff */
1662                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1663                 } else if (0x87 == cmd[2]) { /* mode page policy */
1664                         arr[1] = cmd[2];        /*sanity */
1665                         arr[3] = 0x8;   /* number of following entries */
1666                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1667                         arr[6] = 0x80;  /* mlus, shared */
1668                         arr[8] = 0x18;   /* protocol specific lu */
1669                         arr[10] = 0x82;  /* mlus, per initiator port */
1670                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1671                         arr[1] = cmd[2];        /*sanity */
1672                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1673                 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1674                         arr[1] = cmd[2];        /*sanity */
1675                         n = inquiry_vpd_89(&arr[4]);
1676                         put_unaligned_be16(n, arr + 2);
1677                 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1678                         arr[1] = cmd[2];        /*sanity */
1679                         arr[3] = inquiry_vpd_b0(&arr[4]);
1680                 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1681                         arr[1] = cmd[2];        /*sanity */
1682                         arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1683                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1684                         arr[1] = cmd[2];        /*sanity */
1685                         arr[3] = inquiry_vpd_b2(&arr[4]);
1686                 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1687                         arr[1] = cmd[2];        /*sanity */
1688                         arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1689                 } else {
1690                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1691                         kfree(arr);
1692                         return check_condition_result;
1693                 }
1694                 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1695                 ret = fill_from_dev_buffer(scp, arr,
1696                             min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1697                 kfree(arr);
1698                 return ret;
1699         }
1700         /* drops through here for a standard inquiry */
1701         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1702         arr[2] = sdebug_scsi_level;
1703         arr[3] = 2;    /* response_data_format==2 */
1704         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1705         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1706         if (sdebug_vpd_use_hostno == 0)
1707                 arr[5] |= 0x10; /* claim: implicit TPGS */
1708         arr[6] = 0x10; /* claim: MultiP */
1709         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1710         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1711         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1712         memcpy(&arr[16], sdebug_inq_product_id, 16);
1713         memcpy(&arr[32], sdebug_inq_product_rev, 4);
1714         /* Use Vendor Specific area to place driver date in ASCII hex */
1715         memcpy(&arr[36], sdebug_version_date, 8);
1716         /* version descriptors (2 bytes each) follow */
1717         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1718         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1719         n = 62;
1720         if (is_disk) {          /* SBC-4 no version claimed */
1721                 put_unaligned_be16(0x600, arr + n);
1722                 n += 2;
1723         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1724                 put_unaligned_be16(0x525, arr + n);
1725                 n += 2;
1726         } else if (is_zbc) {    /* ZBC BSR INCITS 536 revision 05 */
1727                 put_unaligned_be16(0x624, arr + n);
1728                 n += 2;
1729         }
1730         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1731         ret = fill_from_dev_buffer(scp, arr,
1732                             min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1733         kfree(arr);
1734         return ret;
1735 }
1736
1737 /* See resp_iec_m_pg() for how this data is manipulated */
1738 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1739                                    0, 0, 0x0, 0x0};
1740
1741 static int resp_requests(struct scsi_cmnd *scp,
1742                          struct sdebug_dev_info *devip)
1743 {
1744         unsigned char *cmd = scp->cmnd;
1745         unsigned char arr[SCSI_SENSE_BUFFERSIZE];       /* assume >= 18 bytes */
1746         bool dsense = !!(cmd[1] & 1);
1747         u32 alloc_len = cmd[4];
1748         u32 len = 18;
1749         int stopped_state = atomic_read(&devip->stopped);
1750
1751         memset(arr, 0, sizeof(arr));
1752         if (stopped_state > 0) {        /* some "pollable" data [spc6r02: 5.12.2] */
1753                 if (dsense) {
1754                         arr[0] = 0x72;
1755                         arr[1] = NOT_READY;
1756                         arr[2] = LOGICAL_UNIT_NOT_READY;
1757                         arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1758                         len = 8;
1759                 } else {
1760                         arr[0] = 0x70;
1761                         arr[2] = NOT_READY;             /* NO_SENSE in sense_key */
1762                         arr[7] = 0xa;                   /* 18 byte sense buffer */
1763                         arr[12] = LOGICAL_UNIT_NOT_READY;
1764                         arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1765                 }
1766         } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1767                 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1768                 if (dsense) {
1769                         arr[0] = 0x72;
1770                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1771                         arr[2] = THRESHOLD_EXCEEDED;
1772                         arr[3] = 0xff;          /* Failure prediction(false) */
1773                         len = 8;
1774                 } else {
1775                         arr[0] = 0x70;
1776                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1777                         arr[7] = 0xa;           /* 18 byte sense buffer */
1778                         arr[12] = THRESHOLD_EXCEEDED;
1779                         arr[13] = 0xff;         /* Failure prediction(false) */
1780                 }
1781         } else {        /* nothing to report */
1782                 if (dsense) {
1783                         len = 8;
1784                         memset(arr, 0, len);
1785                         arr[0] = 0x72;
1786                 } else {
1787                         memset(arr, 0, len);
1788                         arr[0] = 0x70;
1789                         arr[7] = 0xa;
1790                 }
1791         }
1792         return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1793 }
1794
1795 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1796 {
1797         unsigned char *cmd = scp->cmnd;
1798         int power_cond, want_stop, stopped_state;
1799         bool changing;
1800
1801         power_cond = (cmd[4] & 0xf0) >> 4;
1802         if (power_cond) {
1803                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1804                 return check_condition_result;
1805         }
1806         want_stop = !(cmd[4] & 1);
1807         stopped_state = atomic_read(&devip->stopped);
1808         if (stopped_state == 2) {
1809                 ktime_t now_ts = ktime_get_boottime();
1810
1811                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1812                         u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1813
1814                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1815                                 /* tur_ms_to_ready timer extinguished */
1816                                 atomic_set(&devip->stopped, 0);
1817                                 stopped_state = 0;
1818                         }
1819                 }
1820                 if (stopped_state == 2) {
1821                         if (want_stop) {
1822                                 stopped_state = 1;      /* dummy up success */
1823                         } else {        /* Disallow tur_ms_to_ready delay to be overridden */
1824                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1825                                 return check_condition_result;
1826                         }
1827                 }
1828         }
1829         changing = (stopped_state != want_stop);
1830         if (changing)
1831                 atomic_xchg(&devip->stopped, want_stop);
1832         if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1833                 return SDEG_RES_IMMED_MASK;
1834         else
1835                 return 0;
1836 }
1837
1838 static sector_t get_sdebug_capacity(void)
1839 {
1840         static const unsigned int gibibyte = 1073741824;
1841
1842         if (sdebug_virtual_gb > 0)
1843                 return (sector_t)sdebug_virtual_gb *
1844                         (gibibyte / sdebug_sector_size);
1845         else
1846                 return sdebug_store_sectors;
1847 }
1848
1849 #define SDEBUG_READCAP_ARR_SZ 8
1850 static int resp_readcap(struct scsi_cmnd *scp,
1851                         struct sdebug_dev_info *devip)
1852 {
1853         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1854         unsigned int capac;
1855
1856         /* following just in case virtual_gb changed */
1857         sdebug_capacity = get_sdebug_capacity();
1858         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1859         if (sdebug_capacity < 0xffffffff) {
1860                 capac = (unsigned int)sdebug_capacity - 1;
1861                 put_unaligned_be32(capac, arr + 0);
1862         } else
1863                 put_unaligned_be32(0xffffffff, arr + 0);
1864         put_unaligned_be16(sdebug_sector_size, arr + 6);
1865         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1866 }
1867
1868 #define SDEBUG_READCAP16_ARR_SZ 32
1869 static int resp_readcap16(struct scsi_cmnd *scp,
1870                           struct sdebug_dev_info *devip)
1871 {
1872         unsigned char *cmd = scp->cmnd;
1873         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1874         u32 alloc_len;
1875
1876         alloc_len = get_unaligned_be32(cmd + 10);
1877         /* following just in case virtual_gb changed */
1878         sdebug_capacity = get_sdebug_capacity();
1879         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1880         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1881         put_unaligned_be32(sdebug_sector_size, arr + 8);
1882         arr[13] = sdebug_physblk_exp & 0xf;
1883         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1884
1885         if (scsi_debug_lbp()) {
1886                 arr[14] |= 0x80; /* LBPME */
1887                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1888                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1889                  * in the wider field maps to 0 in this field.
1890                  */
1891                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1892                         arr[14] |= 0x40;
1893         }
1894
1895         /*
1896          * Since the scsi_debug READ CAPACITY implementation always reports the
1897          * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1898          */
1899         if (devip->zmodel == BLK_ZONED_HM)
1900                 arr[12] |= 1 << 4;
1901
1902         arr[15] = sdebug_lowest_aligned & 0xff;
1903
1904         if (have_dif_prot) {
1905                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1906                 arr[12] |= 1; /* PROT_EN */
1907         }
1908
1909         return fill_from_dev_buffer(scp, arr,
1910                             min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1911 }
1912
1913 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1914
1915 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1916                               struct sdebug_dev_info *devip)
1917 {
1918         unsigned char *cmd = scp->cmnd;
1919         unsigned char *arr;
1920         int host_no = devip->sdbg_host->shost->host_no;
1921         int port_group_a, port_group_b, port_a, port_b;
1922         u32 alen, n, rlen;
1923         int ret;
1924
1925         alen = get_unaligned_be32(cmd + 6);
1926         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1927         if (! arr)
1928                 return DID_REQUEUE << 16;
1929         /*
1930          * EVPD page 0x88 states we have two ports, one
1931          * real and a fake port with no device connected.
1932          * So we create two port groups with one port each
1933          * and set the group with port B to unavailable.
1934          */
1935         port_a = 0x1; /* relative port A */
1936         port_b = 0x2; /* relative port B */
1937         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1938                         (devip->channel & 0x7f);
1939         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1940                         (devip->channel & 0x7f) + 0x80;
1941
1942         /*
1943          * The asymmetric access state is cycled according to the host_id.
1944          */
1945         n = 4;
1946         if (sdebug_vpd_use_hostno == 0) {
1947                 arr[n++] = host_no % 3; /* Asymm access state */
1948                 arr[n++] = 0x0F; /* claim: all states are supported */
1949         } else {
1950                 arr[n++] = 0x0; /* Active/Optimized path */
1951                 arr[n++] = 0x01; /* only support active/optimized paths */
1952         }
1953         put_unaligned_be16(port_group_a, arr + n);
1954         n += 2;
1955         arr[n++] = 0;    /* Reserved */
1956         arr[n++] = 0;    /* Status code */
1957         arr[n++] = 0;    /* Vendor unique */
1958         arr[n++] = 0x1;  /* One port per group */
1959         arr[n++] = 0;    /* Reserved */
1960         arr[n++] = 0;    /* Reserved */
1961         put_unaligned_be16(port_a, arr + n);
1962         n += 2;
1963         arr[n++] = 3;    /* Port unavailable */
1964         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1965         put_unaligned_be16(port_group_b, arr + n);
1966         n += 2;
1967         arr[n++] = 0;    /* Reserved */
1968         arr[n++] = 0;    /* Status code */
1969         arr[n++] = 0;    /* Vendor unique */
1970         arr[n++] = 0x1;  /* One port per group */
1971         arr[n++] = 0;    /* Reserved */
1972         arr[n++] = 0;    /* Reserved */
1973         put_unaligned_be16(port_b, arr + n);
1974         n += 2;
1975
1976         rlen = n - 4;
1977         put_unaligned_be32(rlen, arr + 0);
1978
1979         /*
1980          * Return the smallest value of either
1981          * - The allocated length
1982          * - The constructed command length
1983          * - The maximum array size
1984          */
1985         rlen = min(alen, n);
1986         ret = fill_from_dev_buffer(scp, arr,
1987                            min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1988         kfree(arr);
1989         return ret;
1990 }
1991
1992 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1993                              struct sdebug_dev_info *devip)
1994 {
1995         bool rctd;
1996         u8 reporting_opts, req_opcode, sdeb_i, supp;
1997         u16 req_sa, u;
1998         u32 alloc_len, a_len;
1999         int k, offset, len, errsts, count, bump, na;
2000         const struct opcode_info_t *oip;
2001         const struct opcode_info_t *r_oip;
2002         u8 *arr;
2003         u8 *cmd = scp->cmnd;
2004
2005         rctd = !!(cmd[2] & 0x80);
2006         reporting_opts = cmd[2] & 0x7;
2007         req_opcode = cmd[3];
2008         req_sa = get_unaligned_be16(cmd + 4);
2009         alloc_len = get_unaligned_be32(cmd + 6);
2010         if (alloc_len < 4 || alloc_len > 0xffff) {
2011                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2012                 return check_condition_result;
2013         }
2014         if (alloc_len > 8192)
2015                 a_len = 8192;
2016         else
2017                 a_len = alloc_len;
2018         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2019         if (NULL == arr) {
2020                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2021                                 INSUFF_RES_ASCQ);
2022                 return check_condition_result;
2023         }
2024         switch (reporting_opts) {
2025         case 0: /* all commands */
2026                 /* count number of commands */
2027                 for (count = 0, oip = opcode_info_arr;
2028                      oip->num_attached != 0xff; ++oip) {
2029                         if (F_INV_OP & oip->flags)
2030                                 continue;
2031                         count += (oip->num_attached + 1);
2032                 }
2033                 bump = rctd ? 20 : 8;
2034                 put_unaligned_be32(count * bump, arr);
2035                 for (offset = 4, oip = opcode_info_arr;
2036                      oip->num_attached != 0xff && offset < a_len; ++oip) {
2037                         if (F_INV_OP & oip->flags)
2038                                 continue;
2039                         na = oip->num_attached;
2040                         arr[offset] = oip->opcode;
2041                         put_unaligned_be16(oip->sa, arr + offset + 2);
2042                         if (rctd)
2043                                 arr[offset + 5] |= 0x2;
2044                         if (FF_SA & oip->flags)
2045                                 arr[offset + 5] |= 0x1;
2046                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2047                         if (rctd)
2048                                 put_unaligned_be16(0xa, arr + offset + 8);
2049                         r_oip = oip;
2050                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2051                                 if (F_INV_OP & oip->flags)
2052                                         continue;
2053                                 offset += bump;
2054                                 arr[offset] = oip->opcode;
2055                                 put_unaligned_be16(oip->sa, arr + offset + 2);
2056                                 if (rctd)
2057                                         arr[offset + 5] |= 0x2;
2058                                 if (FF_SA & oip->flags)
2059                                         arr[offset + 5] |= 0x1;
2060                                 put_unaligned_be16(oip->len_mask[0],
2061                                                    arr + offset + 6);
2062                                 if (rctd)
2063                                         put_unaligned_be16(0xa,
2064                                                            arr + offset + 8);
2065                         }
2066                         oip = r_oip;
2067                         offset += bump;
2068                 }
2069                 break;
2070         case 1: /* one command: opcode only */
2071         case 2: /* one command: opcode plus service action */
2072         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2073                 sdeb_i = opcode_ind_arr[req_opcode];
2074                 oip = &opcode_info_arr[sdeb_i];
2075                 if (F_INV_OP & oip->flags) {
2076                         supp = 1;
2077                         offset = 4;
2078                 } else {
2079                         if (1 == reporting_opts) {
2080                                 if (FF_SA & oip->flags) {
2081                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2082                                                              2, 2);
2083                                         kfree(arr);
2084                                         return check_condition_result;
2085                                 }
2086                                 req_sa = 0;
2087                         } else if (2 == reporting_opts &&
2088                                    0 == (FF_SA & oip->flags)) {
2089                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2090                                 kfree(arr);     /* point at requested sa */
2091                                 return check_condition_result;
2092                         }
2093                         if (0 == (FF_SA & oip->flags) &&
2094                             req_opcode == oip->opcode)
2095                                 supp = 3;
2096                         else if (0 == (FF_SA & oip->flags)) {
2097                                 na = oip->num_attached;
2098                                 for (k = 0, oip = oip->arrp; k < na;
2099                                      ++k, ++oip) {
2100                                         if (req_opcode == oip->opcode)
2101                                                 break;
2102                                 }
2103                                 supp = (k >= na) ? 1 : 3;
2104                         } else if (req_sa != oip->sa) {
2105                                 na = oip->num_attached;
2106                                 for (k = 0, oip = oip->arrp; k < na;
2107                                      ++k, ++oip) {
2108                                         if (req_sa == oip->sa)
2109                                                 break;
2110                                 }
2111                                 supp = (k >= na) ? 1 : 3;
2112                         } else
2113                                 supp = 3;
2114                         if (3 == supp) {
2115                                 u = oip->len_mask[0];
2116                                 put_unaligned_be16(u, arr + 2);
2117                                 arr[4] = oip->opcode;
2118                                 for (k = 1; k < u; ++k)
2119                                         arr[4 + k] = (k < 16) ?
2120                                                  oip->len_mask[k] : 0xff;
2121                                 offset = 4 + u;
2122                         } else
2123                                 offset = 4;
2124                 }
2125                 arr[1] = (rctd ? 0x80 : 0) | supp;
2126                 if (rctd) {
2127                         put_unaligned_be16(0xa, arr + offset);
2128                         offset += 12;
2129                 }
2130                 break;
2131         default:
2132                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2133                 kfree(arr);
2134                 return check_condition_result;
2135         }
2136         offset = (offset < a_len) ? offset : a_len;
2137         len = (offset < alloc_len) ? offset : alloc_len;
2138         errsts = fill_from_dev_buffer(scp, arr, len);
2139         kfree(arr);
2140         return errsts;
2141 }
2142
2143 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2144                           struct sdebug_dev_info *devip)
2145 {
2146         bool repd;
2147         u32 alloc_len, len;
2148         u8 arr[16];
2149         u8 *cmd = scp->cmnd;
2150
2151         memset(arr, 0, sizeof(arr));
2152         repd = !!(cmd[2] & 0x80);
2153         alloc_len = get_unaligned_be32(cmd + 6);
2154         if (alloc_len < 4) {
2155                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2156                 return check_condition_result;
2157         }
2158         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
2159         arr[1] = 0x1;           /* ITNRS */
2160         if (repd) {
2161                 arr[3] = 0xc;
2162                 len = 16;
2163         } else
2164                 len = 4;
2165
2166         len = (len < alloc_len) ? len : alloc_len;
2167         return fill_from_dev_buffer(scp, arr, len);
2168 }
2169
2170 /* <<Following mode page info copied from ST318451LW>> */
2171
2172 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2173 {       /* Read-Write Error Recovery page for mode_sense */
2174         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2175                                         5, 0, 0xff, 0xff};
2176
2177         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2178         if (1 == pcontrol)
2179                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2180         return sizeof(err_recov_pg);
2181 }
2182
2183 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2184 {       /* Disconnect-Reconnect page for mode_sense */
2185         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2186                                          0, 0, 0, 0, 0, 0, 0, 0};
2187
2188         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2189         if (1 == pcontrol)
2190                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2191         return sizeof(disconnect_pg);
2192 }
2193
2194 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2195 {       /* Format device page for mode_sense */
2196         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2197                                      0, 0, 0, 0, 0, 0, 0, 0,
2198                                      0, 0, 0, 0, 0x40, 0, 0, 0};
2199
2200         memcpy(p, format_pg, sizeof(format_pg));
2201         put_unaligned_be16(sdebug_sectors_per, p + 10);
2202         put_unaligned_be16(sdebug_sector_size, p + 12);
2203         if (sdebug_removable)
2204                 p[20] |= 0x20; /* should agree with INQUIRY */
2205         if (1 == pcontrol)
2206                 memset(p + 2, 0, sizeof(format_pg) - 2);
2207         return sizeof(format_pg);
2208 }
2209
2210 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2211                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2212                                      0, 0, 0, 0};
2213
2214 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2215 {       /* Caching page for mode_sense */
2216         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2217                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2218         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2219                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2220
2221         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2222                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2223         memcpy(p, caching_pg, sizeof(caching_pg));
2224         if (1 == pcontrol)
2225                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2226         else if (2 == pcontrol)
2227                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2228         return sizeof(caching_pg);
2229 }
2230
2231 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2232                                     0, 0, 0x2, 0x4b};
2233
2234 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2235 {       /* Control mode page for mode_sense */
2236         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2237                                         0, 0, 0, 0};
2238         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2239                                      0, 0, 0x2, 0x4b};
2240
2241         if (sdebug_dsense)
2242                 ctrl_m_pg[2] |= 0x4;
2243         else
2244                 ctrl_m_pg[2] &= ~0x4;
2245
2246         if (sdebug_ato)
2247                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2248
2249         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2250         if (1 == pcontrol)
2251                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2252         else if (2 == pcontrol)
2253                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2254         return sizeof(ctrl_m_pg);
2255 }
2256
2257
2258 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2259 {       /* Informational Exceptions control mode page for mode_sense */
2260         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2261                                        0, 0, 0x0, 0x0};
2262         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2263                                       0, 0, 0x0, 0x0};
2264
2265         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2266         if (1 == pcontrol)
2267                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2268         else if (2 == pcontrol)
2269                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2270         return sizeof(iec_m_pg);
2271 }
2272
2273 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2274 {       /* SAS SSP mode page - short format for mode_sense */
2275         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2276                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2277
2278         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2279         if (1 == pcontrol)
2280                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2281         return sizeof(sas_sf_m_pg);
2282 }
2283
2284
2285 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2286                               int target_dev_id)
2287 {       /* SAS phy control and discover mode page for mode_sense */
2288         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2289                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2290                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2291                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2292                     0x2, 0, 0, 0, 0, 0, 0, 0,
2293                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2294                     0, 0, 0, 0, 0, 0, 0, 0,
2295                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2296                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2297                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2298                     0x3, 0, 0, 0, 0, 0, 0, 0,
2299                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2300                     0, 0, 0, 0, 0, 0, 0, 0,
2301                 };
2302         int port_a, port_b;
2303
2304         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2305         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2306         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2307         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2308         port_a = target_dev_id + 1;
2309         port_b = port_a + 1;
2310         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2311         put_unaligned_be32(port_a, p + 20);
2312         put_unaligned_be32(port_b, p + 48 + 20);
2313         if (1 == pcontrol)
2314                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2315         return sizeof(sas_pcd_m_pg);
2316 }
2317
2318 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2319 {       /* SAS SSP shared protocol specific port mode subpage */
2320         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2321                     0, 0, 0, 0, 0, 0, 0, 0,
2322                 };
2323
2324         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2325         if (1 == pcontrol)
2326                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2327         return sizeof(sas_sha_m_pg);
2328 }
2329
2330 #define SDEBUG_MAX_MSENSE_SZ 256
2331
2332 static int resp_mode_sense(struct scsi_cmnd *scp,
2333                            struct sdebug_dev_info *devip)
2334 {
2335         int pcontrol, pcode, subpcode, bd_len;
2336         unsigned char dev_spec;
2337         u32 alloc_len, offset, len;
2338         int target_dev_id;
2339         int target = scp->device->id;
2340         unsigned char *ap;
2341         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2342         unsigned char *cmd = scp->cmnd;
2343         bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2344
2345         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2346         pcontrol = (cmd[2] & 0xc0) >> 6;
2347         pcode = cmd[2] & 0x3f;
2348         subpcode = cmd[3];
2349         msense_6 = (MODE_SENSE == cmd[0]);
2350         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2351         is_disk = (sdebug_ptype == TYPE_DISK);
2352         is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2353         if ((is_disk || is_zbc) && !dbd)
2354                 bd_len = llbaa ? 16 : 8;
2355         else
2356                 bd_len = 0;
2357         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2358         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2359         if (0x3 == pcontrol) {  /* Saving values not supported */
2360                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2361                 return check_condition_result;
2362         }
2363         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2364                         (devip->target * 1000) - 3;
2365         /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2366         if (is_disk || is_zbc) {
2367                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2368                 if (sdebug_wp)
2369                         dev_spec |= 0x80;
2370         } else
2371                 dev_spec = 0x0;
2372         if (msense_6) {
2373                 arr[2] = dev_spec;
2374                 arr[3] = bd_len;
2375                 offset = 4;
2376         } else {
2377                 arr[3] = dev_spec;
2378                 if (16 == bd_len)
2379                         arr[4] = 0x1;   /* set LONGLBA bit */
2380                 arr[7] = bd_len;        /* assume 255 or less */
2381                 offset = 8;
2382         }
2383         ap = arr + offset;
2384         if ((bd_len > 0) && (!sdebug_capacity))
2385                 sdebug_capacity = get_sdebug_capacity();
2386
2387         if (8 == bd_len) {
2388                 if (sdebug_capacity > 0xfffffffe)
2389                         put_unaligned_be32(0xffffffff, ap + 0);
2390                 else
2391                         put_unaligned_be32(sdebug_capacity, ap + 0);
2392                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2393                 offset += bd_len;
2394                 ap = arr + offset;
2395         } else if (16 == bd_len) {
2396                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2397                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2398                 offset += bd_len;
2399                 ap = arr + offset;
2400         }
2401
2402         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2403                 /* TODO: Control Extension page */
2404                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2405                 return check_condition_result;
2406         }
2407         bad_pcode = false;
2408
2409         switch (pcode) {
2410         case 0x1:       /* Read-Write error recovery page, direct access */
2411                 len = resp_err_recov_pg(ap, pcontrol, target);
2412                 offset += len;
2413                 break;
2414         case 0x2:       /* Disconnect-Reconnect page, all devices */
2415                 len = resp_disconnect_pg(ap, pcontrol, target);
2416                 offset += len;
2417                 break;
2418         case 0x3:       /* Format device page, direct access */
2419                 if (is_disk) {
2420                         len = resp_format_pg(ap, pcontrol, target);
2421                         offset += len;
2422                 } else
2423                         bad_pcode = true;
2424                 break;
2425         case 0x8:       /* Caching page, direct access */
2426                 if (is_disk || is_zbc) {
2427                         len = resp_caching_pg(ap, pcontrol, target);
2428                         offset += len;
2429                 } else
2430                         bad_pcode = true;
2431                 break;
2432         case 0xa:       /* Control Mode page, all devices */
2433                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2434                 offset += len;
2435                 break;
2436         case 0x19:      /* if spc==1 then sas phy, control+discover */
2437                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2438                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2439                         return check_condition_result;
2440                 }
2441                 len = 0;
2442                 if ((0x0 == subpcode) || (0xff == subpcode))
2443                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2444                 if ((0x1 == subpcode) || (0xff == subpcode))
2445                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2446                                                   target_dev_id);
2447                 if ((0x2 == subpcode) || (0xff == subpcode))
2448                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2449                 offset += len;
2450                 break;
2451         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2452                 len = resp_iec_m_pg(ap, pcontrol, target);
2453                 offset += len;
2454                 break;
2455         case 0x3f:      /* Read all Mode pages */
2456                 if ((0 == subpcode) || (0xff == subpcode)) {
2457                         len = resp_err_recov_pg(ap, pcontrol, target);
2458                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2459                         if (is_disk) {
2460                                 len += resp_format_pg(ap + len, pcontrol,
2461                                                       target);
2462                                 len += resp_caching_pg(ap + len, pcontrol,
2463                                                        target);
2464                         } else if (is_zbc) {
2465                                 len += resp_caching_pg(ap + len, pcontrol,
2466                                                        target);
2467                         }
2468                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2469                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2470                         if (0xff == subpcode) {
2471                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2472                                                   target, target_dev_id);
2473                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2474                         }
2475                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2476                         offset += len;
2477                 } else {
2478                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2479                         return check_condition_result;
2480                 }
2481                 break;
2482         default:
2483                 bad_pcode = true;
2484                 break;
2485         }
2486         if (bad_pcode) {
2487                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2488                 return check_condition_result;
2489         }
2490         if (msense_6)
2491                 arr[0] = offset - 1;
2492         else
2493                 put_unaligned_be16((offset - 2), arr + 0);
2494         return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2495 }
2496
2497 #define SDEBUG_MAX_MSELECT_SZ 512
2498
2499 static int resp_mode_select(struct scsi_cmnd *scp,
2500                             struct sdebug_dev_info *devip)
2501 {
2502         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2503         int param_len, res, mpage;
2504         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2505         unsigned char *cmd = scp->cmnd;
2506         int mselect6 = (MODE_SELECT == cmd[0]);
2507
2508         memset(arr, 0, sizeof(arr));
2509         pf = cmd[1] & 0x10;
2510         sp = cmd[1] & 0x1;
2511         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2512         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2513                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2514                 return check_condition_result;
2515         }
2516         res = fetch_to_dev_buffer(scp, arr, param_len);
2517         if (-1 == res)
2518                 return DID_ERROR << 16;
2519         else if (sdebug_verbose && (res < param_len))
2520                 sdev_printk(KERN_INFO, scp->device,
2521                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2522                             __func__, param_len, res);
2523         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2524         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2525         off = bd_len + (mselect6 ? 4 : 8);
2526         if (md_len > 2 || off >= res) {
2527                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2528                 return check_condition_result;
2529         }
2530         mpage = arr[off] & 0x3f;
2531         ps = !!(arr[off] & 0x80);
2532         if (ps) {
2533                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2534                 return check_condition_result;
2535         }
2536         spf = !!(arr[off] & 0x40);
2537         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2538                        (arr[off + 1] + 2);
2539         if ((pg_len + off) > param_len) {
2540                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2541                                 PARAMETER_LIST_LENGTH_ERR, 0);
2542                 return check_condition_result;
2543         }
2544         switch (mpage) {
2545         case 0x8:      /* Caching Mode page */
2546                 if (caching_pg[1] == arr[off + 1]) {
2547                         memcpy(caching_pg + 2, arr + off + 2,
2548                                sizeof(caching_pg) - 2);
2549                         goto set_mode_changed_ua;
2550                 }
2551                 break;
2552         case 0xa:      /* Control Mode page */
2553                 if (ctrl_m_pg[1] == arr[off + 1]) {
2554                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2555                                sizeof(ctrl_m_pg) - 2);
2556                         if (ctrl_m_pg[4] & 0x8)
2557                                 sdebug_wp = true;
2558                         else
2559                                 sdebug_wp = false;
2560                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2561                         goto set_mode_changed_ua;
2562                 }
2563                 break;
2564         case 0x1c:      /* Informational Exceptions Mode page */
2565                 if (iec_m_pg[1] == arr[off + 1]) {
2566                         memcpy(iec_m_pg + 2, arr + off + 2,
2567                                sizeof(iec_m_pg) - 2);
2568                         goto set_mode_changed_ua;
2569                 }
2570                 break;
2571         default:
2572                 break;
2573         }
2574         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2575         return check_condition_result;
2576 set_mode_changed_ua:
2577         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2578         return 0;
2579 }
2580
2581 static int resp_temp_l_pg(unsigned char *arr)
2582 {
2583         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2584                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2585                 };
2586
2587         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2588         return sizeof(temp_l_pg);
2589 }
2590
2591 static int resp_ie_l_pg(unsigned char *arr)
2592 {
2593         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2594                 };
2595
2596         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2597         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2598                 arr[4] = THRESHOLD_EXCEEDED;
2599                 arr[5] = 0xff;
2600         }
2601         return sizeof(ie_l_pg);
2602 }
2603
2604 static int resp_env_rep_l_spg(unsigned char *arr)
2605 {
2606         unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2607                                          0x0, 40, 72, 0xff, 45, 18, 0, 0,
2608                                          0x1, 0x0, 0x23, 0x8,
2609                                          0x0, 55, 72, 35, 55, 45, 0, 0,
2610                 };
2611
2612         memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2613         return sizeof(env_rep_l_spg);
2614 }
2615
2616 #define SDEBUG_MAX_LSENSE_SZ 512
2617
2618 static int resp_log_sense(struct scsi_cmnd *scp,
2619                           struct sdebug_dev_info *devip)
2620 {
2621         int ppc, sp, pcode, subpcode;
2622         u32 alloc_len, len, n;
2623         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2624         unsigned char *cmd = scp->cmnd;
2625
2626         memset(arr, 0, sizeof(arr));
2627         ppc = cmd[1] & 0x2;
2628         sp = cmd[1] & 0x1;
2629         if (ppc || sp) {
2630                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2631                 return check_condition_result;
2632         }
2633         pcode = cmd[2] & 0x3f;
2634         subpcode = cmd[3] & 0xff;
2635         alloc_len = get_unaligned_be16(cmd + 7);
2636         arr[0] = pcode;
2637         if (0 == subpcode) {
2638                 switch (pcode) {
2639                 case 0x0:       /* Supported log pages log page */
2640                         n = 4;
2641                         arr[n++] = 0x0;         /* this page */
2642                         arr[n++] = 0xd;         /* Temperature */
2643                         arr[n++] = 0x2f;        /* Informational exceptions */
2644                         arr[3] = n - 4;
2645                         break;
2646                 case 0xd:       /* Temperature log page */
2647                         arr[3] = resp_temp_l_pg(arr + 4);
2648                         break;
2649                 case 0x2f:      /* Informational exceptions log page */
2650                         arr[3] = resp_ie_l_pg(arr + 4);
2651                         break;
2652                 default:
2653                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654                         return check_condition_result;
2655                 }
2656         } else if (0xff == subpcode) {
2657                 arr[0] |= 0x40;
2658                 arr[1] = subpcode;
2659                 switch (pcode) {
2660                 case 0x0:       /* Supported log pages and subpages log page */
2661                         n = 4;
2662                         arr[n++] = 0x0;
2663                         arr[n++] = 0x0;         /* 0,0 page */
2664                         arr[n++] = 0x0;
2665                         arr[n++] = 0xff;        /* this page */
2666                         arr[n++] = 0xd;
2667                         arr[n++] = 0x0;         /* Temperature */
2668                         arr[n++] = 0xd;
2669                         arr[n++] = 0x1;         /* Environment reporting */
2670                         arr[n++] = 0xd;
2671                         arr[n++] = 0xff;        /* all 0xd subpages */
2672                         arr[n++] = 0x2f;
2673                         arr[n++] = 0x0; /* Informational exceptions */
2674                         arr[n++] = 0x2f;
2675                         arr[n++] = 0xff;        /* all 0x2f subpages */
2676                         arr[3] = n - 4;
2677                         break;
2678                 case 0xd:       /* Temperature subpages */
2679                         n = 4;
2680                         arr[n++] = 0xd;
2681                         arr[n++] = 0x0;         /* Temperature */
2682                         arr[n++] = 0xd;
2683                         arr[n++] = 0x1;         /* Environment reporting */
2684                         arr[n++] = 0xd;
2685                         arr[n++] = 0xff;        /* these subpages */
2686                         arr[3] = n - 4;
2687                         break;
2688                 case 0x2f:      /* Informational exceptions subpages */
2689                         n = 4;
2690                         arr[n++] = 0x2f;
2691                         arr[n++] = 0x0;         /* Informational exceptions */
2692                         arr[n++] = 0x2f;
2693                         arr[n++] = 0xff;        /* these subpages */
2694                         arr[3] = n - 4;
2695                         break;
2696                 default:
2697                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2698                         return check_condition_result;
2699                 }
2700         } else if (subpcode > 0) {
2701                 arr[0] |= 0x40;
2702                 arr[1] = subpcode;
2703                 if (pcode == 0xd && subpcode == 1)
2704                         arr[3] = resp_env_rep_l_spg(arr + 4);
2705                 else {
2706                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2707                         return check_condition_result;
2708                 }
2709         } else {
2710                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2711                 return check_condition_result;
2712         }
2713         len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2714         return fill_from_dev_buffer(scp, arr,
2715                     min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2716 }
2717
2718 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2719 {
2720         return devip->nr_zones != 0;
2721 }
2722
2723 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2724                                         unsigned long long lba)
2725 {
2726         u32 zno = lba >> devip->zsize_shift;
2727         struct sdeb_zone_state *zsp;
2728
2729         if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2730                 return &devip->zstate[zno];
2731
2732         /*
2733          * If the zone capacity is less than the zone size, adjust for gap
2734          * zones.
2735          */
2736         zno = 2 * zno - devip->nr_conv_zones;
2737         WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2738         zsp = &devip->zstate[zno];
2739         if (lba >= zsp->z_start + zsp->z_size)
2740                 zsp++;
2741         WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2742         return zsp;
2743 }
2744
2745 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2746 {
2747         return zsp->z_type == ZBC_ZTYPE_CNV;
2748 }
2749
2750 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2751 {
2752         return zsp->z_type == ZBC_ZTYPE_GAP;
2753 }
2754
2755 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2756 {
2757         return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2758 }
2759
2760 static void zbc_close_zone(struct sdebug_dev_info *devip,
2761                            struct sdeb_zone_state *zsp)
2762 {
2763         enum sdebug_z_cond zc;
2764
2765         if (!zbc_zone_is_seq(zsp))
2766                 return;
2767
2768         zc = zsp->z_cond;
2769         if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2770                 return;
2771
2772         if (zc == ZC2_IMPLICIT_OPEN)
2773                 devip->nr_imp_open--;
2774         else
2775                 devip->nr_exp_open--;
2776
2777         if (zsp->z_wp == zsp->z_start) {
2778                 zsp->z_cond = ZC1_EMPTY;
2779         } else {
2780                 zsp->z_cond = ZC4_CLOSED;
2781                 devip->nr_closed++;
2782         }
2783 }
2784
2785 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2786 {
2787         struct sdeb_zone_state *zsp = &devip->zstate[0];
2788         unsigned int i;
2789
2790         for (i = 0; i < devip->nr_zones; i++, zsp++) {
2791                 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2792                         zbc_close_zone(devip, zsp);
2793                         return;
2794                 }
2795         }
2796 }
2797
2798 static void zbc_open_zone(struct sdebug_dev_info *devip,
2799                           struct sdeb_zone_state *zsp, bool explicit)
2800 {
2801         enum sdebug_z_cond zc;
2802
2803         if (!zbc_zone_is_seq(zsp))
2804                 return;
2805
2806         zc = zsp->z_cond;
2807         if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2808             (!explicit && zc == ZC2_IMPLICIT_OPEN))
2809                 return;
2810
2811         /* Close an implicit open zone if necessary */
2812         if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2813                 zbc_close_zone(devip, zsp);
2814         else if (devip->max_open &&
2815                  devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2816                 zbc_close_imp_open_zone(devip);
2817
2818         if (zsp->z_cond == ZC4_CLOSED)
2819                 devip->nr_closed--;
2820         if (explicit) {
2821                 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2822                 devip->nr_exp_open++;
2823         } else {
2824                 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2825                 devip->nr_imp_open++;
2826         }
2827 }
2828
2829 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2830                                      struct sdeb_zone_state *zsp)
2831 {
2832         switch (zsp->z_cond) {
2833         case ZC2_IMPLICIT_OPEN:
2834                 devip->nr_imp_open--;
2835                 break;
2836         case ZC3_EXPLICIT_OPEN:
2837                 devip->nr_exp_open--;
2838                 break;
2839         default:
2840                 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2841                           zsp->z_start, zsp->z_cond);
2842                 break;
2843         }
2844         zsp->z_cond = ZC5_FULL;
2845 }
2846
2847 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2848                        unsigned long long lba, unsigned int num)
2849 {
2850         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2851         unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2852
2853         if (!zbc_zone_is_seq(zsp))
2854                 return;
2855
2856         if (zsp->z_type == ZBC_ZTYPE_SWR) {
2857                 zsp->z_wp += num;
2858                 if (zsp->z_wp >= zend)
2859                         zbc_set_zone_full(devip, zsp);
2860                 return;
2861         }
2862
2863         while (num) {
2864                 if (lba != zsp->z_wp)
2865                         zsp->z_non_seq_resource = true;
2866
2867                 end = lba + num;
2868                 if (end >= zend) {
2869                         n = zend - lba;
2870                         zsp->z_wp = zend;
2871                 } else if (end > zsp->z_wp) {
2872                         n = num;
2873                         zsp->z_wp = end;
2874                 } else {
2875                         n = num;
2876                 }
2877                 if (zsp->z_wp >= zend)
2878                         zbc_set_zone_full(devip, zsp);
2879
2880                 num -= n;
2881                 lba += n;
2882                 if (num) {
2883                         zsp++;
2884                         zend = zsp->z_start + zsp->z_size;
2885                 }
2886         }
2887 }
2888
2889 static int check_zbc_access_params(struct scsi_cmnd *scp,
2890                         unsigned long long lba, unsigned int num, bool write)
2891 {
2892         struct scsi_device *sdp = scp->device;
2893         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2894         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2895         struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2896
2897         if (!write) {
2898                 if (devip->zmodel == BLK_ZONED_HA)
2899                         return 0;
2900                 /* For host-managed, reads cannot cross zone types boundaries */
2901                 if (zsp->z_type != zsp_end->z_type) {
2902                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2903                                         LBA_OUT_OF_RANGE,
2904                                         READ_INVDATA_ASCQ);
2905                         return check_condition_result;
2906                 }
2907                 return 0;
2908         }
2909
2910         /* Writing into a gap zone is not allowed */
2911         if (zbc_zone_is_gap(zsp)) {
2912                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2913                                 ATTEMPT_ACCESS_GAP);
2914                 return check_condition_result;
2915         }
2916
2917         /* No restrictions for writes within conventional zones */
2918         if (zbc_zone_is_conv(zsp)) {
2919                 if (!zbc_zone_is_conv(zsp_end)) {
2920                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2921                                         LBA_OUT_OF_RANGE,
2922                                         WRITE_BOUNDARY_ASCQ);
2923                         return check_condition_result;
2924                 }
2925                 return 0;
2926         }
2927
2928         if (zsp->z_type == ZBC_ZTYPE_SWR) {
2929                 /* Writes cannot cross sequential zone boundaries */
2930                 if (zsp_end != zsp) {
2931                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2932                                         LBA_OUT_OF_RANGE,
2933                                         WRITE_BOUNDARY_ASCQ);
2934                         return check_condition_result;
2935                 }
2936                 /* Cannot write full zones */
2937                 if (zsp->z_cond == ZC5_FULL) {
2938                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2939                                         INVALID_FIELD_IN_CDB, 0);
2940                         return check_condition_result;
2941                 }
2942                 /* Writes must be aligned to the zone WP */
2943                 if (lba != zsp->z_wp) {
2944                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2945                                         LBA_OUT_OF_RANGE,
2946                                         UNALIGNED_WRITE_ASCQ);
2947                         return check_condition_result;
2948                 }
2949         }
2950
2951         /* Handle implicit open of closed and empty zones */
2952         if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2953                 if (devip->max_open &&
2954                     devip->nr_exp_open >= devip->max_open) {
2955                         mk_sense_buffer(scp, DATA_PROTECT,
2956                                         INSUFF_RES_ASC,
2957                                         INSUFF_ZONE_ASCQ);
2958                         return check_condition_result;
2959                 }
2960                 zbc_open_zone(devip, zsp, false);
2961         }
2962
2963         return 0;
2964 }
2965
2966 static inline int check_device_access_params
2967                         (struct scsi_cmnd *scp, unsigned long long lba,
2968                          unsigned int num, bool write)
2969 {
2970         struct scsi_device *sdp = scp->device;
2971         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2972
2973         if (lba + num > sdebug_capacity) {
2974                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2975                 return check_condition_result;
2976         }
2977         /* transfer length excessive (tie in to block limits VPD page) */
2978         if (num > sdebug_store_sectors) {
2979                 /* needs work to find which cdb byte 'num' comes from */
2980                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2981                 return check_condition_result;
2982         }
2983         if (write && unlikely(sdebug_wp)) {
2984                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2985                 return check_condition_result;
2986         }
2987         if (sdebug_dev_is_zoned(devip))
2988                 return check_zbc_access_params(scp, lba, num, write);
2989
2990         return 0;
2991 }
2992
2993 /*
2994  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2995  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2996  * that access any of the "stores" in struct sdeb_store_info should call this
2997  * function with bug_if_fake_rw set to true.
2998  */
2999 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3000                                                 bool bug_if_fake_rw)
3001 {
3002         if (sdebug_fake_rw) {
3003                 BUG_ON(bug_if_fake_rw); /* See note above */
3004                 return NULL;
3005         }
3006         return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3007 }
3008
3009 /* Returns number of bytes copied or -1 if error. */
3010 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3011                             u32 sg_skip, u64 lba, u32 num, bool do_write)
3012 {
3013         int ret;
3014         u64 block, rest = 0;
3015         enum dma_data_direction dir;
3016         struct scsi_data_buffer *sdb = &scp->sdb;
3017         u8 *fsp;
3018
3019         if (do_write) {
3020                 dir = DMA_TO_DEVICE;
3021                 write_since_sync = true;
3022         } else {
3023                 dir = DMA_FROM_DEVICE;
3024         }
3025
3026         if (!sdb->length || !sip)
3027                 return 0;
3028         if (scp->sc_data_direction != dir)
3029                 return -1;
3030         fsp = sip->storep;
3031
3032         block = do_div(lba, sdebug_store_sectors);
3033         if (block + num > sdebug_store_sectors)
3034                 rest = block + num - sdebug_store_sectors;
3035
3036         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3037                    fsp + (block * sdebug_sector_size),
3038                    (num - rest) * sdebug_sector_size, sg_skip, do_write);
3039         if (ret != (num - rest) * sdebug_sector_size)
3040                 return ret;
3041
3042         if (rest) {
3043                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3044                             fsp, rest * sdebug_sector_size,
3045                             sg_skip + ((num - rest) * sdebug_sector_size),
3046                             do_write);
3047         }
3048
3049         return ret;
3050 }
3051
3052 /* Returns number of bytes copied or -1 if error. */
3053 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3054 {
3055         struct scsi_data_buffer *sdb = &scp->sdb;
3056
3057         if (!sdb->length)
3058                 return 0;
3059         if (scp->sc_data_direction != DMA_TO_DEVICE)
3060                 return -1;
3061         return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3062                               num * sdebug_sector_size, 0, true);
3063 }
3064
3065 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3066  * arr into sip->storep+lba and return true. If comparison fails then
3067  * return false. */
3068 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3069                               const u8 *arr, bool compare_only)
3070 {
3071         bool res;
3072         u64 block, rest = 0;
3073         u32 store_blks = sdebug_store_sectors;
3074         u32 lb_size = sdebug_sector_size;
3075         u8 *fsp = sip->storep;
3076
3077         block = do_div(lba, store_blks);
3078         if (block + num > store_blks)
3079                 rest = block + num - store_blks;
3080
3081         res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3082         if (!res)
3083                 return res;
3084         if (rest)
3085                 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3086                              rest * lb_size);
3087         if (!res)
3088                 return res;
3089         if (compare_only)
3090                 return true;
3091         arr += num * lb_size;
3092         memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3093         if (rest)
3094                 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3095         return res;
3096 }
3097
3098 static __be16 dif_compute_csum(const void *buf, int len)
3099 {
3100         __be16 csum;
3101
3102         if (sdebug_guard)
3103                 csum = (__force __be16)ip_compute_csum(buf, len);
3104         else
3105                 csum = cpu_to_be16(crc_t10dif(buf, len));
3106
3107         return csum;
3108 }
3109
3110 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3111                       sector_t sector, u32 ei_lba)
3112 {
3113         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3114
3115         if (sdt->guard_tag != csum) {
3116                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3117                         (unsigned long)sector,
3118                         be16_to_cpu(sdt->guard_tag),
3119                         be16_to_cpu(csum));
3120                 return 0x01;
3121         }
3122         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3123             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3124                 pr_err("REF check failed on sector %lu\n",
3125                         (unsigned long)sector);
3126                 return 0x03;
3127         }
3128         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3129             be32_to_cpu(sdt->ref_tag) != ei_lba) {
3130                 pr_err("REF check failed on sector %lu\n",
3131                         (unsigned long)sector);
3132                 return 0x03;
3133         }
3134         return 0;
3135 }
3136
3137 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3138                           unsigned int sectors, bool read)
3139 {
3140         size_t resid;
3141         void *paddr;
3142         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3143                                                 scp->device->hostdata, true);
3144         struct t10_pi_tuple *dif_storep = sip->dif_storep;
3145         const void *dif_store_end = dif_storep + sdebug_store_sectors;
3146         struct sg_mapping_iter miter;
3147
3148         /* Bytes of protection data to copy into sgl */
3149         resid = sectors * sizeof(*dif_storep);
3150
3151         sg_miter_start(&miter, scsi_prot_sglist(scp),
3152                        scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3153                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3154
3155         while (sg_miter_next(&miter) && resid > 0) {
3156                 size_t len = min_t(size_t, miter.length, resid);
3157                 void *start = dif_store(sip, sector);
3158                 size_t rest = 0;
3159
3160                 if (dif_store_end < start + len)
3161                         rest = start + len - dif_store_end;
3162
3163                 paddr = miter.addr;
3164
3165                 if (read)
3166                         memcpy(paddr, start, len - rest);
3167                 else
3168                         memcpy(start, paddr, len - rest);
3169
3170                 if (rest) {
3171                         if (read)
3172                                 memcpy(paddr + len - rest, dif_storep, rest);
3173                         else
3174                                 memcpy(dif_storep, paddr + len - rest, rest);
3175                 }
3176
3177                 sector += len / sizeof(*dif_storep);
3178                 resid -= len;
3179         }
3180         sg_miter_stop(&miter);
3181 }
3182
3183 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3184                             unsigned int sectors, u32 ei_lba)
3185 {
3186         int ret = 0;
3187         unsigned int i;
3188         sector_t sector;
3189         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3190                                                 scp->device->hostdata, true);
3191         struct t10_pi_tuple *sdt;
3192
3193         for (i = 0; i < sectors; i++, ei_lba++) {
3194                 sector = start_sec + i;
3195                 sdt = dif_store(sip, sector);
3196
3197                 if (sdt->app_tag == cpu_to_be16(0xffff))
3198                         continue;
3199
3200                 /*
3201                  * Because scsi_debug acts as both initiator and
3202                  * target we proceed to verify the PI even if
3203                  * RDPROTECT=3. This is done so the "initiator" knows
3204                  * which type of error to return. Otherwise we would
3205                  * have to iterate over the PI twice.
3206                  */
3207                 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3208                         ret = dif_verify(sdt, lba2fake_store(sip, sector),
3209                                          sector, ei_lba);
3210                         if (ret) {
3211                                 dif_errors++;
3212                                 break;
3213                         }
3214                 }
3215         }
3216
3217         dif_copy_prot(scp, start_sec, sectors, true);
3218         dix_reads++;
3219
3220         return ret;
3221 }
3222
3223 static inline void
3224 sdeb_read_lock(struct sdeb_store_info *sip)
3225 {
3226         if (sdebug_no_rwlock) {
3227                 if (sip)
3228                         __acquire(&sip->macc_lck);
3229                 else
3230                         __acquire(&sdeb_fake_rw_lck);
3231         } else {
3232                 if (sip)
3233                         read_lock(&sip->macc_lck);
3234                 else
3235                         read_lock(&sdeb_fake_rw_lck);
3236         }
3237 }
3238
3239 static inline void
3240 sdeb_read_unlock(struct sdeb_store_info *sip)
3241 {
3242         if (sdebug_no_rwlock) {
3243                 if (sip)
3244                         __release(&sip->macc_lck);
3245                 else
3246                         __release(&sdeb_fake_rw_lck);
3247         } else {
3248                 if (sip)
3249                         read_unlock(&sip->macc_lck);
3250                 else
3251                         read_unlock(&sdeb_fake_rw_lck);
3252         }
3253 }
3254
3255 static inline void
3256 sdeb_write_lock(struct sdeb_store_info *sip)
3257 {
3258         if (sdebug_no_rwlock) {
3259                 if (sip)
3260                         __acquire(&sip->macc_lck);
3261                 else
3262                         __acquire(&sdeb_fake_rw_lck);
3263         } else {
3264                 if (sip)
3265                         write_lock(&sip->macc_lck);
3266                 else
3267                         write_lock(&sdeb_fake_rw_lck);
3268         }
3269 }
3270
3271 static inline void
3272 sdeb_write_unlock(struct sdeb_store_info *sip)
3273 {
3274         if (sdebug_no_rwlock) {
3275                 if (sip)
3276                         __release(&sip->macc_lck);
3277                 else
3278                         __release(&sdeb_fake_rw_lck);
3279         } else {
3280                 if (sip)
3281                         write_unlock(&sip->macc_lck);
3282                 else
3283                         write_unlock(&sdeb_fake_rw_lck);
3284         }
3285 }
3286
3287 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3288 {
3289         bool check_prot;
3290         u32 num;
3291         u32 ei_lba;
3292         int ret;
3293         u64 lba;
3294         struct sdeb_store_info *sip = devip2sip(devip, true);
3295         u8 *cmd = scp->cmnd;
3296
3297         switch (cmd[0]) {
3298         case READ_16:
3299                 ei_lba = 0;
3300                 lba = get_unaligned_be64(cmd + 2);
3301                 num = get_unaligned_be32(cmd + 10);
3302                 check_prot = true;
3303                 break;
3304         case READ_10:
3305                 ei_lba = 0;
3306                 lba = get_unaligned_be32(cmd + 2);
3307                 num = get_unaligned_be16(cmd + 7);
3308                 check_prot = true;
3309                 break;
3310         case READ_6:
3311                 ei_lba = 0;
3312                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3313                       (u32)(cmd[1] & 0x1f) << 16;
3314                 num = (0 == cmd[4]) ? 256 : cmd[4];
3315                 check_prot = true;
3316                 break;
3317         case READ_12:
3318                 ei_lba = 0;
3319                 lba = get_unaligned_be32(cmd + 2);
3320                 num = get_unaligned_be32(cmd + 6);
3321                 check_prot = true;
3322                 break;
3323         case XDWRITEREAD_10:
3324                 ei_lba = 0;
3325                 lba = get_unaligned_be32(cmd + 2);
3326                 num = get_unaligned_be16(cmd + 7);
3327                 check_prot = false;
3328                 break;
3329         default:        /* assume READ(32) */
3330                 lba = get_unaligned_be64(cmd + 12);
3331                 ei_lba = get_unaligned_be32(cmd + 20);
3332                 num = get_unaligned_be32(cmd + 28);
3333                 check_prot = false;
3334                 break;
3335         }
3336         if (unlikely(have_dif_prot && check_prot)) {
3337                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3338                     (cmd[1] & 0xe0)) {
3339                         mk_sense_invalid_opcode(scp);
3340                         return check_condition_result;
3341                 }
3342                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3343                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3344                     (cmd[1] & 0xe0) == 0)
3345                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3346                                     "to DIF device\n");
3347         }
3348         if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3349                      atomic_read(&sdeb_inject_pending))) {
3350                 num /= 2;
3351                 atomic_set(&sdeb_inject_pending, 0);
3352         }
3353
3354         ret = check_device_access_params(scp, lba, num, false);
3355         if (ret)
3356                 return ret;
3357         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3358                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3359                      ((lba + num) > sdebug_medium_error_start))) {
3360                 /* claim unrecoverable read error */
3361                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3362                 /* set info field and valid bit for fixed descriptor */
3363                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3364                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
3365                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
3366                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3367                         put_unaligned_be32(ret, scp->sense_buffer + 3);
3368                 }
3369                 scsi_set_resid(scp, scsi_bufflen(scp));
3370                 return check_condition_result;
3371         }
3372
3373         sdeb_read_lock(sip);
3374
3375         /* DIX + T10 DIF */
3376         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3377                 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3378                 case 1: /* Guard tag error */
3379                         if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3380                                 sdeb_read_unlock(sip);
3381                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3382                                 return check_condition_result;
3383                         } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3384                                 sdeb_read_unlock(sip);
3385                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3386                                 return illegal_condition_result;
3387                         }
3388                         break;
3389                 case 3: /* Reference tag error */
3390                         if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3391                                 sdeb_read_unlock(sip);
3392                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3393                                 return check_condition_result;
3394                         } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3395                                 sdeb_read_unlock(sip);
3396                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3397                                 return illegal_condition_result;
3398                         }
3399                         break;
3400                 }
3401         }
3402
3403         ret = do_device_access(sip, scp, 0, lba, num, false);
3404         sdeb_read_unlock(sip);
3405         if (unlikely(ret == -1))
3406                 return DID_ERROR << 16;
3407
3408         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3409
3410         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3411                      atomic_read(&sdeb_inject_pending))) {
3412                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3413                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3414                         atomic_set(&sdeb_inject_pending, 0);
3415                         return check_condition_result;
3416                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3417                         /* Logical block guard check failed */
3418                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3419                         atomic_set(&sdeb_inject_pending, 0);
3420                         return illegal_condition_result;
3421                 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3422                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3423                         atomic_set(&sdeb_inject_pending, 0);
3424                         return illegal_condition_result;
3425                 }
3426         }
3427         return 0;
3428 }
3429
3430 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3431                              unsigned int sectors, u32 ei_lba)
3432 {
3433         int ret;
3434         struct t10_pi_tuple *sdt;
3435         void *daddr;
3436         sector_t sector = start_sec;
3437         int ppage_offset;
3438         int dpage_offset;
3439         struct sg_mapping_iter diter;
3440         struct sg_mapping_iter piter;
3441
3442         BUG_ON(scsi_sg_count(SCpnt) == 0);
3443         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3444
3445         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3446                         scsi_prot_sg_count(SCpnt),
3447                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3448         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3449                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3450
3451         /* For each protection page */
3452         while (sg_miter_next(&piter)) {
3453                 dpage_offset = 0;
3454                 if (WARN_ON(!sg_miter_next(&diter))) {
3455                         ret = 0x01;
3456                         goto out;
3457                 }
3458
3459                 for (ppage_offset = 0; ppage_offset < piter.length;
3460                      ppage_offset += sizeof(struct t10_pi_tuple)) {
3461                         /* If we're at the end of the current
3462                          * data page advance to the next one
3463                          */
3464                         if (dpage_offset >= diter.length) {
3465                                 if (WARN_ON(!sg_miter_next(&diter))) {
3466                                         ret = 0x01;
3467                                         goto out;
3468                                 }
3469                                 dpage_offset = 0;
3470                         }
3471
3472                         sdt = piter.addr + ppage_offset;
3473                         daddr = diter.addr + dpage_offset;
3474
3475                         if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3476                                 ret = dif_verify(sdt, daddr, sector, ei_lba);
3477                                 if (ret)
3478                                         goto out;
3479                         }
3480
3481                         sector++;
3482                         ei_lba++;
3483                         dpage_offset += sdebug_sector_size;
3484                 }
3485                 diter.consumed = dpage_offset;
3486                 sg_miter_stop(&diter);
3487         }
3488         sg_miter_stop(&piter);
3489
3490         dif_copy_prot(SCpnt, start_sec, sectors, false);
3491         dix_writes++;
3492
3493         return 0;
3494
3495 out:
3496         dif_errors++;
3497         sg_miter_stop(&diter);
3498         sg_miter_stop(&piter);
3499         return ret;
3500 }
3501
3502 static unsigned long lba_to_map_index(sector_t lba)
3503 {
3504         if (sdebug_unmap_alignment)
3505                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3506         sector_div(lba, sdebug_unmap_granularity);
3507         return lba;
3508 }
3509
3510 static sector_t map_index_to_lba(unsigned long index)
3511 {
3512         sector_t lba = index * sdebug_unmap_granularity;
3513
3514         if (sdebug_unmap_alignment)
3515                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3516         return lba;
3517 }
3518
3519 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3520                               unsigned int *num)
3521 {
3522         sector_t end;
3523         unsigned int mapped;
3524         unsigned long index;
3525         unsigned long next;
3526
3527         index = lba_to_map_index(lba);
3528         mapped = test_bit(index, sip->map_storep);
3529
3530         if (mapped)
3531                 next = find_next_zero_bit(sip->map_storep, map_size, index);
3532         else
3533                 next = find_next_bit(sip->map_storep, map_size, index);
3534
3535         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3536         *num = end - lba;
3537         return mapped;
3538 }
3539
3540 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3541                        unsigned int len)
3542 {
3543         sector_t end = lba + len;
3544
3545         while (lba < end) {
3546                 unsigned long index = lba_to_map_index(lba);
3547
3548                 if (index < map_size)
3549                         set_bit(index, sip->map_storep);
3550
3551                 lba = map_index_to_lba(index + 1);
3552         }
3553 }
3554
3555 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3556                          unsigned int len)
3557 {
3558         sector_t end = lba + len;
3559         u8 *fsp = sip->storep;
3560
3561         while (lba < end) {
3562                 unsigned long index = lba_to_map_index(lba);
3563
3564                 if (lba == map_index_to_lba(index) &&
3565                     lba + sdebug_unmap_granularity <= end &&
3566                     index < map_size) {
3567                         clear_bit(index, sip->map_storep);
3568                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3569                                 memset(fsp + lba * sdebug_sector_size,
3570                                        (sdebug_lbprz & 1) ? 0 : 0xff,
3571                                        sdebug_sector_size *
3572                                        sdebug_unmap_granularity);
3573                         }
3574                         if (sip->dif_storep) {
3575                                 memset(sip->dif_storep + lba, 0xff,
3576                                        sizeof(*sip->dif_storep) *
3577                                        sdebug_unmap_granularity);
3578                         }
3579                 }
3580                 lba = map_index_to_lba(index + 1);
3581         }
3582 }
3583
3584 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3585 {
3586         bool check_prot;
3587         u32 num;
3588         u32 ei_lba;
3589         int ret;
3590         u64 lba;
3591         struct sdeb_store_info *sip = devip2sip(devip, true);
3592         u8 *cmd = scp->cmnd;
3593
3594         switch (cmd[0]) {
3595         case WRITE_16:
3596                 ei_lba = 0;
3597                 lba = get_unaligned_be64(cmd + 2);
3598                 num = get_unaligned_be32(cmd + 10);
3599                 check_prot = true;
3600                 break;
3601         case WRITE_10:
3602                 ei_lba = 0;
3603                 lba = get_unaligned_be32(cmd + 2);
3604                 num = get_unaligned_be16(cmd + 7);
3605                 check_prot = true;
3606                 break;
3607         case WRITE_6:
3608                 ei_lba = 0;
3609                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3610                       (u32)(cmd[1] & 0x1f) << 16;
3611                 num = (0 == cmd[4]) ? 256 : cmd[4];
3612                 check_prot = true;
3613                 break;
3614         case WRITE_12:
3615                 ei_lba = 0;
3616                 lba = get_unaligned_be32(cmd + 2);
3617                 num = get_unaligned_be32(cmd + 6);
3618                 check_prot = true;
3619                 break;
3620         case 0x53:      /* XDWRITEREAD(10) */
3621                 ei_lba = 0;
3622                 lba = get_unaligned_be32(cmd + 2);
3623                 num = get_unaligned_be16(cmd + 7);
3624                 check_prot = false;
3625                 break;
3626         default:        /* assume WRITE(32) */
3627                 lba = get_unaligned_be64(cmd + 12);
3628                 ei_lba = get_unaligned_be32(cmd + 20);
3629                 num = get_unaligned_be32(cmd + 28);
3630                 check_prot = false;
3631                 break;
3632         }
3633         if (unlikely(have_dif_prot && check_prot)) {
3634                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3635                     (cmd[1] & 0xe0)) {
3636                         mk_sense_invalid_opcode(scp);
3637                         return check_condition_result;
3638                 }
3639                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3640                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3641                     (cmd[1] & 0xe0) == 0)
3642                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3643                                     "to DIF device\n");
3644         }
3645
3646         sdeb_write_lock(sip);
3647         ret = check_device_access_params(scp, lba, num, true);
3648         if (ret) {
3649                 sdeb_write_unlock(sip);
3650                 return ret;
3651         }
3652
3653         /* DIX + T10 DIF */
3654         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3655                 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3656                 case 1: /* Guard tag error */
3657                         if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3658                                 sdeb_write_unlock(sip);
3659                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3660                                 return illegal_condition_result;
3661                         } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3662                                 sdeb_write_unlock(sip);
3663                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3664                                 return check_condition_result;
3665                         }
3666                         break;
3667                 case 3: /* Reference tag error */
3668                         if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3669                                 sdeb_write_unlock(sip);
3670                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3671                                 return illegal_condition_result;
3672                         } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3673                                 sdeb_write_unlock(sip);
3674                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3675                                 return check_condition_result;
3676                         }
3677                         break;
3678                 }
3679         }
3680
3681         ret = do_device_access(sip, scp, 0, lba, num, true);
3682         if (unlikely(scsi_debug_lbp()))
3683                 map_region(sip, lba, num);
3684         /* If ZBC zone then bump its write pointer */
3685         if (sdebug_dev_is_zoned(devip))
3686                 zbc_inc_wp(devip, lba, num);
3687         sdeb_write_unlock(sip);
3688         if (unlikely(-1 == ret))
3689                 return DID_ERROR << 16;
3690         else if (unlikely(sdebug_verbose &&
3691                           (ret < (num * sdebug_sector_size))))
3692                 sdev_printk(KERN_INFO, scp->device,
3693                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3694                             my_name, num * sdebug_sector_size, ret);
3695
3696         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3697                      atomic_read(&sdeb_inject_pending))) {
3698                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3699                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3700                         atomic_set(&sdeb_inject_pending, 0);
3701                         return check_condition_result;
3702                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3703                         /* Logical block guard check failed */
3704                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3705                         atomic_set(&sdeb_inject_pending, 0);
3706                         return illegal_condition_result;
3707                 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3708                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3709                         atomic_set(&sdeb_inject_pending, 0);
3710                         return illegal_condition_result;
3711                 }
3712         }
3713         return 0;
3714 }
3715
3716 /*
3717  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3718  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3719  */
3720 static int resp_write_scat(struct scsi_cmnd *scp,
3721                            struct sdebug_dev_info *devip)
3722 {
3723         u8 *cmd = scp->cmnd;
3724         u8 *lrdp = NULL;
3725         u8 *up;
3726         struct sdeb_store_info *sip = devip2sip(devip, true);
3727         u8 wrprotect;
3728         u16 lbdof, num_lrd, k;
3729         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3730         u32 lb_size = sdebug_sector_size;
3731         u32 ei_lba;
3732         u64 lba;
3733         int ret, res;
3734         bool is_16;
3735         static const u32 lrd_size = 32; /* + parameter list header size */
3736
3737         if (cmd[0] == VARIABLE_LENGTH_CMD) {
3738                 is_16 = false;
3739                 wrprotect = (cmd[10] >> 5) & 0x7;
3740                 lbdof = get_unaligned_be16(cmd + 12);
3741                 num_lrd = get_unaligned_be16(cmd + 16);
3742                 bt_len = get_unaligned_be32(cmd + 28);
3743         } else {        /* that leaves WRITE SCATTERED(16) */
3744                 is_16 = true;
3745                 wrprotect = (cmd[2] >> 5) & 0x7;
3746                 lbdof = get_unaligned_be16(cmd + 4);
3747                 num_lrd = get_unaligned_be16(cmd + 8);
3748                 bt_len = get_unaligned_be32(cmd + 10);
3749                 if (unlikely(have_dif_prot)) {
3750                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3751                             wrprotect) {
3752                                 mk_sense_invalid_opcode(scp);
3753                                 return illegal_condition_result;
3754                         }
3755                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3756                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3757                              wrprotect == 0)
3758                                 sdev_printk(KERN_ERR, scp->device,
3759                                             "Unprotected WR to DIF device\n");
3760                 }
3761         }
3762         if ((num_lrd == 0) || (bt_len == 0))
3763                 return 0;       /* T10 says these do-nothings are not errors */
3764         if (lbdof == 0) {
3765                 if (sdebug_verbose)
3766                         sdev_printk(KERN_INFO, scp->device,
3767                                 "%s: %s: LB Data Offset field bad\n",
3768                                 my_name, __func__);
3769                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3770                 return illegal_condition_result;
3771         }
3772         lbdof_blen = lbdof * lb_size;
3773         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3774                 if (sdebug_verbose)
3775                         sdev_printk(KERN_INFO, scp->device,
3776                                 "%s: %s: LBA range descriptors don't fit\n",
3777                                 my_name, __func__);
3778                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3779                 return illegal_condition_result;
3780         }
3781         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3782         if (lrdp == NULL)
3783                 return SCSI_MLQUEUE_HOST_BUSY;
3784         if (sdebug_verbose)
3785                 sdev_printk(KERN_INFO, scp->device,
3786                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3787                         my_name, __func__, lbdof_blen);
3788         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3789         if (res == -1) {
3790                 ret = DID_ERROR << 16;
3791                 goto err_out;
3792         }
3793
3794         sdeb_write_lock(sip);
3795         sg_off = lbdof_blen;
3796         /* Spec says Buffer xfer Length field in number of LBs in dout */
3797         cum_lb = 0;
3798         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3799                 lba = get_unaligned_be64(up + 0);
3800                 num = get_unaligned_be32(up + 8);
3801                 if (sdebug_verbose)
3802                         sdev_printk(KERN_INFO, scp->device,
3803                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3804                                 my_name, __func__, k, lba, num, sg_off);
3805                 if (num == 0)
3806                         continue;
3807                 ret = check_device_access_params(scp, lba, num, true);
3808                 if (ret)
3809                         goto err_out_unlock;
3810                 num_by = num * lb_size;
3811                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3812
3813                 if ((cum_lb + num) > bt_len) {
3814                         if (sdebug_verbose)
3815                                 sdev_printk(KERN_INFO, scp->device,
3816                                     "%s: %s: sum of blocks > data provided\n",
3817                                     my_name, __func__);
3818                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3819                                         0);
3820                         ret = illegal_condition_result;
3821                         goto err_out_unlock;
3822                 }
3823
3824                 /* DIX + T10 DIF */
3825                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3826                         int prot_ret = prot_verify_write(scp, lba, num,
3827                                                          ei_lba);
3828
3829                         if (prot_ret) {
3830                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3831                                                 prot_ret);
3832                                 ret = illegal_condition_result;
3833                                 goto err_out_unlock;
3834                         }
3835                 }
3836
3837                 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3838                 /* If ZBC zone then bump its write pointer */
3839                 if (sdebug_dev_is_zoned(devip))
3840                         zbc_inc_wp(devip, lba, num);
3841                 if (unlikely(scsi_debug_lbp()))
3842                         map_region(sip, lba, num);
3843                 if (unlikely(-1 == ret)) {
3844                         ret = DID_ERROR << 16;
3845                         goto err_out_unlock;
3846                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3847                         sdev_printk(KERN_INFO, scp->device,
3848                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3849                             my_name, num_by, ret);
3850
3851                 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3852                              atomic_read(&sdeb_inject_pending))) {
3853                         if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3854                                 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3855                                 atomic_set(&sdeb_inject_pending, 0);
3856                                 ret = check_condition_result;
3857                                 goto err_out_unlock;
3858                         } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3859                                 /* Logical block guard check failed */
3860                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3861                                 atomic_set(&sdeb_inject_pending, 0);
3862                                 ret = illegal_condition_result;
3863                                 goto err_out_unlock;
3864                         } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3865                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3866                                 atomic_set(&sdeb_inject_pending, 0);
3867                                 ret = illegal_condition_result;
3868                                 goto err_out_unlock;
3869                         }
3870                 }
3871                 sg_off += num_by;
3872                 cum_lb += num;
3873         }
3874         ret = 0;
3875 err_out_unlock:
3876         sdeb_write_unlock(sip);
3877 err_out:
3878         kfree(lrdp);
3879         return ret;
3880 }
3881
3882 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3883                            u32 ei_lba, bool unmap, bool ndob)
3884 {
3885         struct scsi_device *sdp = scp->device;
3886         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3887         unsigned long long i;
3888         u64 block, lbaa;
3889         u32 lb_size = sdebug_sector_size;
3890         int ret;
3891         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3892                                                 scp->device->hostdata, true);
3893         u8 *fs1p;
3894         u8 *fsp;
3895
3896         sdeb_write_lock(sip);
3897
3898         ret = check_device_access_params(scp, lba, num, true);
3899         if (ret) {
3900                 sdeb_write_unlock(sip);
3901                 return ret;
3902         }
3903
3904         if (unmap && scsi_debug_lbp()) {
3905                 unmap_region(sip, lba, num);
3906                 goto out;
3907         }
3908         lbaa = lba;
3909         block = do_div(lbaa, sdebug_store_sectors);
3910         /* if ndob then zero 1 logical block, else fetch 1 logical block */
3911         fsp = sip->storep;
3912         fs1p = fsp + (block * lb_size);
3913         if (ndob) {
3914                 memset(fs1p, 0, lb_size);
3915                 ret = 0;
3916         } else
3917                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3918
3919         if (-1 == ret) {
3920                 sdeb_write_unlock(sip);
3921                 return DID_ERROR << 16;
3922         } else if (sdebug_verbose && !ndob && (ret < lb_size))
3923                 sdev_printk(KERN_INFO, scp->device,
3924                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
3925                             my_name, "write same", lb_size, ret);
3926
3927         /* Copy first sector to remaining blocks */
3928         for (i = 1 ; i < num ; i++) {
3929                 lbaa = lba + i;
3930                 block = do_div(lbaa, sdebug_store_sectors);
3931                 memmove(fsp + (block * lb_size), fs1p, lb_size);
3932         }
3933         if (scsi_debug_lbp())
3934                 map_region(sip, lba, num);
3935         /* If ZBC zone then bump its write pointer */
3936         if (sdebug_dev_is_zoned(devip))
3937                 zbc_inc_wp(devip, lba, num);
3938 out:
3939         sdeb_write_unlock(sip);
3940
3941         return 0;
3942 }
3943
3944 static int resp_write_same_10(struct scsi_cmnd *scp,
3945                               struct sdebug_dev_info *devip)
3946 {
3947         u8 *cmd = scp->cmnd;
3948         u32 lba;
3949         u16 num;
3950         u32 ei_lba = 0;
3951         bool unmap = false;
3952
3953         if (cmd[1] & 0x8) {
3954                 if (sdebug_lbpws10 == 0) {
3955                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3956                         return check_condition_result;
3957                 } else
3958                         unmap = true;
3959         }
3960         lba = get_unaligned_be32(cmd + 2);
3961         num = get_unaligned_be16(cmd + 7);
3962         if (num > sdebug_write_same_length) {
3963                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3964                 return check_condition_result;
3965         }
3966         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3967 }
3968
3969 static int resp_write_same_16(struct scsi_cmnd *scp,
3970                               struct sdebug_dev_info *devip)
3971 {
3972         u8 *cmd = scp->cmnd;
3973         u64 lba;
3974         u32 num;
3975         u32 ei_lba = 0;
3976         bool unmap = false;
3977         bool ndob = false;
3978
3979         if (cmd[1] & 0x8) {     /* UNMAP */
3980                 if (sdebug_lbpws == 0) {
3981                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3982                         return check_condition_result;
3983                 } else
3984                         unmap = true;
3985         }
3986         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3987                 ndob = true;
3988         lba = get_unaligned_be64(cmd + 2);
3989         num = get_unaligned_be32(cmd + 10);
3990         if (num > sdebug_write_same_length) {
3991                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3992                 return check_condition_result;
3993         }
3994         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3995 }
3996
3997 /* Note the mode field is in the same position as the (lower) service action
3998  * field. For the Report supported operation codes command, SPC-4 suggests
3999  * each mode of this command should be reported separately; for future. */
4000 static int resp_write_buffer(struct scsi_cmnd *scp,
4001                              struct sdebug_dev_info *devip)
4002 {
4003         u8 *cmd = scp->cmnd;
4004         struct scsi_device *sdp = scp->device;
4005         struct sdebug_dev_info *dp;
4006         u8 mode;
4007
4008         mode = cmd[1] & 0x1f;
4009         switch (mode) {
4010         case 0x4:       /* download microcode (MC) and activate (ACT) */
4011                 /* set UAs on this device only */
4012                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4013                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4014                 break;
4015         case 0x5:       /* download MC, save and ACT */
4016                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4017                 break;
4018         case 0x6:       /* download MC with offsets and ACT */
4019                 /* set UAs on most devices (LUs) in this target */
4020                 list_for_each_entry(dp,
4021                                     &devip->sdbg_host->dev_info_list,
4022                                     dev_list)
4023                         if (dp->target == sdp->id) {
4024                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4025                                 if (devip != dp)
4026                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4027                                                 dp->uas_bm);
4028                         }
4029                 break;
4030         case 0x7:       /* download MC with offsets, save, and ACT */
4031                 /* set UA on all devices (LUs) in this target */
4032                 list_for_each_entry(dp,
4033                                     &devip->sdbg_host->dev_info_list,
4034                                     dev_list)
4035                         if (dp->target == sdp->id)
4036                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4037                                         dp->uas_bm);
4038                 break;
4039         default:
4040                 /* do nothing for this command for other mode values */
4041                 break;
4042         }
4043         return 0;
4044 }
4045
4046 static int resp_comp_write(struct scsi_cmnd *scp,
4047                            struct sdebug_dev_info *devip)
4048 {
4049         u8 *cmd = scp->cmnd;
4050         u8 *arr;
4051         struct sdeb_store_info *sip = devip2sip(devip, true);
4052         u64 lba;
4053         u32 dnum;
4054         u32 lb_size = sdebug_sector_size;
4055         u8 num;
4056         int ret;
4057         int retval = 0;
4058
4059         lba = get_unaligned_be64(cmd + 2);
4060         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
4061         if (0 == num)
4062                 return 0;       /* degenerate case, not an error */
4063         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4064             (cmd[1] & 0xe0)) {
4065                 mk_sense_invalid_opcode(scp);
4066                 return check_condition_result;
4067         }
4068         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4069              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4070             (cmd[1] & 0xe0) == 0)
4071                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4072                             "to DIF device\n");
4073         ret = check_device_access_params(scp, lba, num, false);
4074         if (ret)
4075                 return ret;
4076         dnum = 2 * num;
4077         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4078         if (NULL == arr) {
4079                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4080                                 INSUFF_RES_ASCQ);
4081                 return check_condition_result;
4082         }
4083
4084         sdeb_write_lock(sip);
4085
4086         ret = do_dout_fetch(scp, dnum, arr);
4087         if (ret == -1) {
4088                 retval = DID_ERROR << 16;
4089                 goto cleanup;
4090         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4091                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4092                             "indicated=%u, IO sent=%d bytes\n", my_name,
4093                             dnum * lb_size, ret);
4094         if (!comp_write_worker(sip, lba, num, arr, false)) {
4095                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4096                 retval = check_condition_result;
4097                 goto cleanup;
4098         }
4099         if (scsi_debug_lbp())
4100                 map_region(sip, lba, num);
4101 cleanup:
4102         sdeb_write_unlock(sip);
4103         kfree(arr);
4104         return retval;
4105 }
4106
4107 struct unmap_block_desc {
4108         __be64  lba;
4109         __be32  blocks;
4110         __be32  __reserved;
4111 };
4112
4113 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4114 {
4115         unsigned char *buf;
4116         struct unmap_block_desc *desc;
4117         struct sdeb_store_info *sip = devip2sip(devip, true);
4118         unsigned int i, payload_len, descriptors;
4119         int ret;
4120
4121         if (!scsi_debug_lbp())
4122                 return 0;       /* fib and say its done */
4123         payload_len = get_unaligned_be16(scp->cmnd + 7);
4124         BUG_ON(scsi_bufflen(scp) != payload_len);
4125
4126         descriptors = (payload_len - 8) / 16;
4127         if (descriptors > sdebug_unmap_max_desc) {
4128                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4129                 return check_condition_result;
4130         }
4131
4132         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4133         if (!buf) {
4134                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4135                                 INSUFF_RES_ASCQ);
4136                 return check_condition_result;
4137         }
4138
4139         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4140
4141         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4142         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4143
4144         desc = (void *)&buf[8];
4145
4146         sdeb_write_lock(sip);
4147
4148         for (i = 0 ; i < descriptors ; i++) {
4149                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4150                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4151
4152                 ret = check_device_access_params(scp, lba, num, true);
4153                 if (ret)
4154                         goto out;
4155
4156                 unmap_region(sip, lba, num);
4157         }
4158
4159         ret = 0;
4160
4161 out:
4162         sdeb_write_unlock(sip);
4163         kfree(buf);
4164
4165         return ret;
4166 }
4167
4168 #define SDEBUG_GET_LBA_STATUS_LEN 32
4169
4170 static int resp_get_lba_status(struct scsi_cmnd *scp,
4171                                struct sdebug_dev_info *devip)
4172 {
4173         u8 *cmd = scp->cmnd;
4174         u64 lba;
4175         u32 alloc_len, mapped, num;
4176         int ret;
4177         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4178
4179         lba = get_unaligned_be64(cmd + 2);
4180         alloc_len = get_unaligned_be32(cmd + 10);
4181
4182         if (alloc_len < 24)
4183                 return 0;
4184
4185         ret = check_device_access_params(scp, lba, 1, false);
4186         if (ret)
4187                 return ret;
4188
4189         if (scsi_debug_lbp()) {
4190                 struct sdeb_store_info *sip = devip2sip(devip, true);
4191
4192                 mapped = map_state(sip, lba, &num);
4193         } else {
4194                 mapped = 1;
4195                 /* following just in case virtual_gb changed */
4196                 sdebug_capacity = get_sdebug_capacity();
4197                 if (sdebug_capacity - lba <= 0xffffffff)
4198                         num = sdebug_capacity - lba;
4199                 else
4200                         num = 0xffffffff;
4201         }
4202
4203         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4204         put_unaligned_be32(20, arr);            /* Parameter Data Length */
4205         put_unaligned_be64(lba, arr + 8);       /* LBA */
4206         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
4207         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
4208
4209         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4210 }
4211
4212 static int resp_sync_cache(struct scsi_cmnd *scp,
4213                            struct sdebug_dev_info *devip)
4214 {
4215         int res = 0;
4216         u64 lba;
4217         u32 num_blocks;
4218         u8 *cmd = scp->cmnd;
4219
4220         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
4221                 lba = get_unaligned_be32(cmd + 2);
4222                 num_blocks = get_unaligned_be16(cmd + 7);
4223         } else {                                /* SYNCHRONIZE_CACHE(16) */
4224                 lba = get_unaligned_be64(cmd + 2);
4225                 num_blocks = get_unaligned_be32(cmd + 10);
4226         }
4227         if (lba + num_blocks > sdebug_capacity) {
4228                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4229                 return check_condition_result;
4230         }
4231         if (!write_since_sync || (cmd[1] & 0x2))
4232                 res = SDEG_RES_IMMED_MASK;
4233         else            /* delay if write_since_sync and IMMED clear */
4234                 write_since_sync = false;
4235         return res;
4236 }
4237
4238 /*
4239  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4240  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4241  * a GOOD status otherwise. Model a disk with a big cache and yield
4242  * CONDITION MET. Actually tries to bring range in main memory into the
4243  * cache associated with the CPU(s).
4244  */
4245 static int resp_pre_fetch(struct scsi_cmnd *scp,
4246                           struct sdebug_dev_info *devip)
4247 {
4248         int res = 0;
4249         u64 lba;
4250         u64 block, rest = 0;
4251         u32 nblks;
4252         u8 *cmd = scp->cmnd;
4253         struct sdeb_store_info *sip = devip2sip(devip, true);
4254         u8 *fsp = sip->storep;
4255
4256         if (cmd[0] == PRE_FETCH) {      /* 10 byte cdb */
4257                 lba = get_unaligned_be32(cmd + 2);
4258                 nblks = get_unaligned_be16(cmd + 7);
4259         } else {                        /* PRE-FETCH(16) */
4260                 lba = get_unaligned_be64(cmd + 2);
4261                 nblks = get_unaligned_be32(cmd + 10);
4262         }
4263         if (lba + nblks > sdebug_capacity) {
4264                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4265                 return check_condition_result;
4266         }
4267         if (!fsp)
4268                 goto fini;
4269         /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4270         block = do_div(lba, sdebug_store_sectors);
4271         if (block + nblks > sdebug_store_sectors)
4272                 rest = block + nblks - sdebug_store_sectors;
4273
4274         /* Try to bring the PRE-FETCH range into CPU's cache */
4275         sdeb_read_lock(sip);
4276         prefetch_range(fsp + (sdebug_sector_size * block),
4277                        (nblks - rest) * sdebug_sector_size);
4278         if (rest)
4279                 prefetch_range(fsp, rest * sdebug_sector_size);
4280         sdeb_read_unlock(sip);
4281 fini:
4282         if (cmd[1] & 0x2)
4283                 res = SDEG_RES_IMMED_MASK;
4284         return res | condition_met_result;
4285 }
4286
4287 #define RL_BUCKET_ELEMS 8
4288
4289 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4290  * (W-LUN), the normal Linux scanning logic does not associate it with a
4291  * device (e.g. /dev/sg7). The following magic will make that association:
4292  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4293  * where <n> is a host number. If there are multiple targets in a host then
4294  * the above will associate a W-LUN to each target. To only get a W-LUN
4295  * for target 2, then use "echo '- 2 49409' > scan" .
4296  */
4297 static int resp_report_luns(struct scsi_cmnd *scp,
4298                             struct sdebug_dev_info *devip)
4299 {
4300         unsigned char *cmd = scp->cmnd;
4301         unsigned int alloc_len;
4302         unsigned char select_report;
4303         u64 lun;
4304         struct scsi_lun *lun_p;
4305         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4306         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
4307         unsigned int wlun_cnt;  /* report luns W-LUN count */
4308         unsigned int tlun_cnt;  /* total LUN count */
4309         unsigned int rlen;      /* response length (in bytes) */
4310         int k, j, n, res;
4311         unsigned int off_rsp = 0;
4312         const int sz_lun = sizeof(struct scsi_lun);
4313
4314         clear_luns_changed_on_target(devip);
4315
4316         select_report = cmd[2];
4317         alloc_len = get_unaligned_be32(cmd + 6);
4318
4319         if (alloc_len < 4) {
4320                 pr_err("alloc len too small %d\n", alloc_len);
4321                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4322                 return check_condition_result;
4323         }
4324
4325         switch (select_report) {
4326         case 0:         /* all LUNs apart from W-LUNs */
4327                 lun_cnt = sdebug_max_luns;
4328                 wlun_cnt = 0;
4329                 break;
4330         case 1:         /* only W-LUNs */
4331                 lun_cnt = 0;
4332                 wlun_cnt = 1;
4333                 break;
4334         case 2:         /* all LUNs */
4335                 lun_cnt = sdebug_max_luns;
4336                 wlun_cnt = 1;
4337                 break;
4338         case 0x10:      /* only administrative LUs */
4339         case 0x11:      /* see SPC-5 */
4340         case 0x12:      /* only subsiduary LUs owned by referenced LU */
4341         default:
4342                 pr_debug("select report invalid %d\n", select_report);
4343                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4344                 return check_condition_result;
4345         }
4346
4347         if (sdebug_no_lun_0 && (lun_cnt > 0))
4348                 --lun_cnt;
4349
4350         tlun_cnt = lun_cnt + wlun_cnt;
4351         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
4352         scsi_set_resid(scp, scsi_bufflen(scp));
4353         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4354                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4355
4356         /* loops rely on sizeof response header same as sizeof lun (both 8) */
4357         lun = sdebug_no_lun_0 ? 1 : 0;
4358         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4359                 memset(arr, 0, sizeof(arr));
4360                 lun_p = (struct scsi_lun *)&arr[0];
4361                 if (k == 0) {
4362                         put_unaligned_be32(rlen, &arr[0]);
4363                         ++lun_p;
4364                         j = 1;
4365                 }
4366                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4367                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4368                                 break;
4369                         int_to_scsilun(lun++, lun_p);
4370                         if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4371                                 lun_p->scsi_lun[0] |= 0x40;
4372                 }
4373                 if (j < RL_BUCKET_ELEMS)
4374                         break;
4375                 n = j * sz_lun;
4376                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4377                 if (res)
4378                         return res;
4379                 off_rsp += n;
4380         }
4381         if (wlun_cnt) {
4382                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4383                 ++j;
4384         }
4385         if (j > 0)
4386                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4387         return res;
4388 }
4389
4390 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4391 {
4392         bool is_bytchk3 = false;
4393         u8 bytchk;
4394         int ret, j;
4395         u32 vnum, a_num, off;
4396         const u32 lb_size = sdebug_sector_size;
4397         u64 lba;
4398         u8 *arr;
4399         u8 *cmd = scp->cmnd;
4400         struct sdeb_store_info *sip = devip2sip(devip, true);
4401
4402         bytchk = (cmd[1] >> 1) & 0x3;
4403         if (bytchk == 0) {
4404                 return 0;       /* always claim internal verify okay */
4405         } else if (bytchk == 2) {
4406                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4407                 return check_condition_result;
4408         } else if (bytchk == 3) {
4409                 is_bytchk3 = true;      /* 1 block sent, compared repeatedly */
4410         }
4411         switch (cmd[0]) {
4412         case VERIFY_16:
4413                 lba = get_unaligned_be64(cmd + 2);
4414                 vnum = get_unaligned_be32(cmd + 10);
4415                 break;
4416         case VERIFY:            /* is VERIFY(10) */
4417                 lba = get_unaligned_be32(cmd + 2);
4418                 vnum = get_unaligned_be16(cmd + 7);
4419                 break;
4420         default:
4421                 mk_sense_invalid_opcode(scp);
4422                 return check_condition_result;
4423         }
4424         if (vnum == 0)
4425                 return 0;       /* not an error */
4426         a_num = is_bytchk3 ? 1 : vnum;
4427         /* Treat following check like one for read (i.e. no write) access */
4428         ret = check_device_access_params(scp, lba, a_num, false);
4429         if (ret)
4430                 return ret;
4431
4432         arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4433         if (!arr) {
4434                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4435                                 INSUFF_RES_ASCQ);
4436                 return check_condition_result;
4437         }
4438         /* Not changing store, so only need read access */
4439         sdeb_read_lock(sip);
4440
4441         ret = do_dout_fetch(scp, a_num, arr);
4442         if (ret == -1) {
4443                 ret = DID_ERROR << 16;
4444                 goto cleanup;
4445         } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4446                 sdev_printk(KERN_INFO, scp->device,
4447                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4448                             my_name, __func__, a_num * lb_size, ret);
4449         }
4450         if (is_bytchk3) {
4451                 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4452                         memcpy(arr + off, arr, lb_size);
4453         }
4454         ret = 0;
4455         if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4456                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4457                 ret = check_condition_result;
4458                 goto cleanup;
4459         }
4460 cleanup:
4461         sdeb_read_unlock(sip);
4462         kfree(arr);
4463         return ret;
4464 }
4465
4466 #define RZONES_DESC_HD 64
4467
4468 /* Report zones depending on start LBA and reporting options */
4469 static int resp_report_zones(struct scsi_cmnd *scp,
4470                              struct sdebug_dev_info *devip)
4471 {
4472         unsigned int rep_max_zones, nrz = 0;
4473         int ret = 0;
4474         u32 alloc_len, rep_opts, rep_len;
4475         bool partial;
4476         u64 lba, zs_lba;
4477         u8 *arr = NULL, *desc;
4478         u8 *cmd = scp->cmnd;
4479         struct sdeb_zone_state *zsp = NULL;
4480         struct sdeb_store_info *sip = devip2sip(devip, false);
4481
4482         if (!sdebug_dev_is_zoned(devip)) {
4483                 mk_sense_invalid_opcode(scp);
4484                 return check_condition_result;
4485         }
4486         zs_lba = get_unaligned_be64(cmd + 2);
4487         alloc_len = get_unaligned_be32(cmd + 10);
4488         if (alloc_len == 0)
4489                 return 0;       /* not an error */
4490         rep_opts = cmd[14] & 0x3f;
4491         partial = cmd[14] & 0x80;
4492
4493         if (zs_lba >= sdebug_capacity) {
4494                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4495                 return check_condition_result;
4496         }
4497
4498         rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4499
4500         arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4501         if (!arr) {
4502                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4503                                 INSUFF_RES_ASCQ);
4504                 return check_condition_result;
4505         }
4506
4507         sdeb_read_lock(sip);
4508
4509         desc = arr + 64;
4510         for (lba = zs_lba; lba < sdebug_capacity;
4511              lba = zsp->z_start + zsp->z_size) {
4512                 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4513                         break;
4514                 zsp = zbc_zone(devip, lba);
4515                 switch (rep_opts) {
4516                 case 0x00:
4517                         /* All zones */
4518                         break;
4519                 case 0x01:
4520                         /* Empty zones */
4521                         if (zsp->z_cond != ZC1_EMPTY)
4522                                 continue;
4523                         break;
4524                 case 0x02:
4525                         /* Implicit open zones */
4526                         if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4527                                 continue;
4528                         break;
4529                 case 0x03:
4530                         /* Explicit open zones */
4531                         if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4532                                 continue;
4533                         break;
4534                 case 0x04:
4535                         /* Closed zones */
4536                         if (zsp->z_cond != ZC4_CLOSED)
4537                                 continue;
4538                         break;
4539                 case 0x05:
4540                         /* Full zones */
4541                         if (zsp->z_cond != ZC5_FULL)
4542                                 continue;
4543                         break;
4544                 case 0x06:
4545                 case 0x07:
4546                 case 0x10:
4547                         /*
4548                          * Read-only, offline, reset WP recommended are
4549                          * not emulated: no zones to report;
4550                          */
4551                         continue;
4552                 case 0x11:
4553                         /* non-seq-resource set */
4554                         if (!zsp->z_non_seq_resource)
4555                                 continue;
4556                         break;
4557                 case 0x3e:
4558                         /* All zones except gap zones. */
4559                         if (zbc_zone_is_gap(zsp))
4560                                 continue;
4561                         break;
4562                 case 0x3f:
4563                         /* Not write pointer (conventional) zones */
4564                         if (zbc_zone_is_seq(zsp))
4565                                 continue;
4566                         break;
4567                 default:
4568                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
4569                                         INVALID_FIELD_IN_CDB, 0);
4570                         ret = check_condition_result;
4571                         goto fini;
4572                 }
4573
4574                 if (nrz < rep_max_zones) {
4575                         /* Fill zone descriptor */
4576                         desc[0] = zsp->z_type;
4577                         desc[1] = zsp->z_cond << 4;
4578                         if (zsp->z_non_seq_resource)
4579                                 desc[1] |= 1 << 1;
4580                         put_unaligned_be64((u64)zsp->z_size, desc + 8);
4581                         put_unaligned_be64((u64)zsp->z_start, desc + 16);
4582                         put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4583                         desc += 64;
4584                 }
4585
4586                 if (partial && nrz >= rep_max_zones)
4587                         break;
4588
4589                 nrz++;
4590         }
4591
4592         /* Report header */
4593         /* Zone list length. */
4594         put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4595         /* Maximum LBA */
4596         put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4597         /* Zone starting LBA granularity. */
4598         if (devip->zcap < devip->zsize)
4599                 put_unaligned_be64(devip->zsize, arr + 16);
4600
4601         rep_len = (unsigned long)desc - (unsigned long)arr;
4602         ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4603
4604 fini:
4605         sdeb_read_unlock(sip);
4606         kfree(arr);
4607         return ret;
4608 }
4609
4610 /* Logic transplanted from tcmu-runner, file_zbc.c */
4611 static void zbc_open_all(struct sdebug_dev_info *devip)
4612 {
4613         struct sdeb_zone_state *zsp = &devip->zstate[0];
4614         unsigned int i;
4615
4616         for (i = 0; i < devip->nr_zones; i++, zsp++) {
4617                 if (zsp->z_cond == ZC4_CLOSED)
4618                         zbc_open_zone(devip, &devip->zstate[i], true);
4619         }
4620 }
4621
4622 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4623 {
4624         int res = 0;
4625         u64 z_id;
4626         enum sdebug_z_cond zc;
4627         u8 *cmd = scp->cmnd;
4628         struct sdeb_zone_state *zsp;
4629         bool all = cmd[14] & 0x01;
4630         struct sdeb_store_info *sip = devip2sip(devip, false);
4631
4632         if (!sdebug_dev_is_zoned(devip)) {
4633                 mk_sense_invalid_opcode(scp);
4634                 return check_condition_result;
4635         }
4636
4637         sdeb_write_lock(sip);
4638
4639         if (all) {
4640                 /* Check if all closed zones can be open */
4641                 if (devip->max_open &&
4642                     devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4643                         mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4644                                         INSUFF_ZONE_ASCQ);
4645                         res = check_condition_result;
4646                         goto fini;
4647                 }
4648                 /* Open all closed zones */
4649                 zbc_open_all(devip);
4650                 goto fini;
4651         }
4652
4653         /* Open the specified zone */
4654         z_id = get_unaligned_be64(cmd + 2);
4655         if (z_id >= sdebug_capacity) {
4656                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4657                 res = check_condition_result;
4658                 goto fini;
4659         }
4660
4661         zsp = zbc_zone(devip, z_id);
4662         if (z_id != zsp->z_start) {
4663                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664                 res = check_condition_result;
4665                 goto fini;
4666         }
4667         if (zbc_zone_is_conv(zsp)) {
4668                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4669                 res = check_condition_result;
4670                 goto fini;
4671         }
4672
4673         zc = zsp->z_cond;
4674         if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4675                 goto fini;
4676
4677         if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4678                 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4679                                 INSUFF_ZONE_ASCQ);
4680                 res = check_condition_result;
4681                 goto fini;
4682         }
4683
4684         zbc_open_zone(devip, zsp, true);
4685 fini:
4686         sdeb_write_unlock(sip);
4687         return res;
4688 }
4689
4690 static void zbc_close_all(struct sdebug_dev_info *devip)
4691 {
4692         unsigned int i;
4693
4694         for (i = 0; i < devip->nr_zones; i++)
4695                 zbc_close_zone(devip, &devip->zstate[i]);
4696 }
4697
4698 static int resp_close_zone(struct scsi_cmnd *scp,
4699                            struct sdebug_dev_info *devip)
4700 {
4701         int res = 0;
4702         u64 z_id;
4703         u8 *cmd = scp->cmnd;
4704         struct sdeb_zone_state *zsp;
4705         bool all = cmd[14] & 0x01;
4706         struct sdeb_store_info *sip = devip2sip(devip, false);
4707
4708         if (!sdebug_dev_is_zoned(devip)) {
4709                 mk_sense_invalid_opcode(scp);
4710                 return check_condition_result;
4711         }
4712
4713         sdeb_write_lock(sip);
4714
4715         if (all) {
4716                 zbc_close_all(devip);
4717                 goto fini;
4718         }
4719
4720         /* Close specified zone */
4721         z_id = get_unaligned_be64(cmd + 2);
4722         if (z_id >= sdebug_capacity) {
4723                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4724                 res = check_condition_result;
4725                 goto fini;
4726         }
4727
4728         zsp = zbc_zone(devip, z_id);
4729         if (z_id != zsp->z_start) {
4730                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4731                 res = check_condition_result;
4732                 goto fini;
4733         }
4734         if (zbc_zone_is_conv(zsp)) {
4735                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4736                 res = check_condition_result;
4737                 goto fini;
4738         }
4739
4740         zbc_close_zone(devip, zsp);
4741 fini:
4742         sdeb_write_unlock(sip);
4743         return res;
4744 }
4745
4746 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4747                             struct sdeb_zone_state *zsp, bool empty)
4748 {
4749         enum sdebug_z_cond zc = zsp->z_cond;
4750
4751         if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4752             zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4753                 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4754                         zbc_close_zone(devip, zsp);
4755                 if (zsp->z_cond == ZC4_CLOSED)
4756                         devip->nr_closed--;
4757                 zsp->z_wp = zsp->z_start + zsp->z_size;
4758                 zsp->z_cond = ZC5_FULL;
4759         }
4760 }
4761
4762 static void zbc_finish_all(struct sdebug_dev_info *devip)
4763 {
4764         unsigned int i;
4765
4766         for (i = 0; i < devip->nr_zones; i++)
4767                 zbc_finish_zone(devip, &devip->zstate[i], false);
4768 }
4769
4770 static int resp_finish_zone(struct scsi_cmnd *scp,
4771                             struct sdebug_dev_info *devip)
4772 {
4773         struct sdeb_zone_state *zsp;
4774         int res = 0;
4775         u64 z_id;
4776         u8 *cmd = scp->cmnd;
4777         bool all = cmd[14] & 0x01;
4778         struct sdeb_store_info *sip = devip2sip(devip, false);
4779
4780         if (!sdebug_dev_is_zoned(devip)) {
4781                 mk_sense_invalid_opcode(scp);
4782                 return check_condition_result;
4783         }
4784
4785         sdeb_write_lock(sip);
4786
4787         if (all) {
4788                 zbc_finish_all(devip);
4789                 goto fini;
4790         }
4791
4792         /* Finish the specified zone */
4793         z_id = get_unaligned_be64(cmd + 2);
4794         if (z_id >= sdebug_capacity) {
4795                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4796                 res = check_condition_result;
4797                 goto fini;
4798         }
4799
4800         zsp = zbc_zone(devip, z_id);
4801         if (z_id != zsp->z_start) {
4802                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4803                 res = check_condition_result;
4804                 goto fini;
4805         }
4806         if (zbc_zone_is_conv(zsp)) {
4807                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4808                 res = check_condition_result;
4809                 goto fini;
4810         }
4811
4812         zbc_finish_zone(devip, zsp, true);
4813 fini:
4814         sdeb_write_unlock(sip);
4815         return res;
4816 }
4817
4818 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4819                          struct sdeb_zone_state *zsp)
4820 {
4821         enum sdebug_z_cond zc;
4822         struct sdeb_store_info *sip = devip2sip(devip, false);
4823
4824         if (!zbc_zone_is_seq(zsp))
4825                 return;
4826
4827         zc = zsp->z_cond;
4828         if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4829                 zbc_close_zone(devip, zsp);
4830
4831         if (zsp->z_cond == ZC4_CLOSED)
4832                 devip->nr_closed--;
4833
4834         if (zsp->z_wp > zsp->z_start)
4835                 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4836                        (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4837
4838         zsp->z_non_seq_resource = false;
4839         zsp->z_wp = zsp->z_start;
4840         zsp->z_cond = ZC1_EMPTY;
4841 }
4842
4843 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4844 {
4845         unsigned int i;
4846
4847         for (i = 0; i < devip->nr_zones; i++)
4848                 zbc_rwp_zone(devip, &devip->zstate[i]);
4849 }
4850
4851 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4852 {
4853         struct sdeb_zone_state *zsp;
4854         int res = 0;
4855         u64 z_id;
4856         u8 *cmd = scp->cmnd;
4857         bool all = cmd[14] & 0x01;
4858         struct sdeb_store_info *sip = devip2sip(devip, false);
4859
4860         if (!sdebug_dev_is_zoned(devip)) {
4861                 mk_sense_invalid_opcode(scp);
4862                 return check_condition_result;
4863         }
4864
4865         sdeb_write_lock(sip);
4866
4867         if (all) {
4868                 zbc_rwp_all(devip);
4869                 goto fini;
4870         }
4871
4872         z_id = get_unaligned_be64(cmd + 2);
4873         if (z_id >= sdebug_capacity) {
4874                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4875                 res = check_condition_result;
4876                 goto fini;
4877         }
4878
4879         zsp = zbc_zone(devip, z_id);
4880         if (z_id != zsp->z_start) {
4881                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4882                 res = check_condition_result;
4883                 goto fini;
4884         }
4885         if (zbc_zone_is_conv(zsp)) {
4886                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4887                 res = check_condition_result;
4888                 goto fini;
4889         }
4890
4891         zbc_rwp_zone(devip, zsp);
4892 fini:
4893         sdeb_write_unlock(sip);
4894         return res;
4895 }
4896
4897 static u32 get_tag(struct scsi_cmnd *cmnd)
4898 {
4899         return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4900 }
4901
4902 /* Queued (deferred) command completions converge here. */
4903 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4904 {
4905         struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
4906         unsigned long flags;
4907         struct scsi_cmnd *scp = sqcp->scmd;
4908         struct sdebug_scsi_cmd *sdsc;
4909         bool aborted;
4910
4911         if (sdebug_statistics) {
4912                 atomic_inc(&sdebug_completions);
4913                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4914                         atomic_inc(&sdebug_miss_cpus);
4915         }
4916
4917         if (!scp) {
4918                 pr_err("scmd=NULL\n");
4919                 goto out;
4920         }
4921
4922         sdsc = scsi_cmd_priv(scp);
4923         spin_lock_irqsave(&sdsc->lock, flags);
4924         aborted = sd_dp->aborted;
4925         if (unlikely(aborted))
4926                 sd_dp->aborted = false;
4927         ASSIGN_QUEUED_CMD(scp, NULL);
4928
4929         spin_unlock_irqrestore(&sdsc->lock, flags);
4930
4931         if (aborted) {
4932                 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4933                 blk_abort_request(scsi_cmd_to_rq(scp));
4934                 goto out;
4935         }
4936
4937         scsi_done(scp); /* callback to mid level */
4938 out:
4939         sdebug_free_queued_cmd(sqcp);
4940 }
4941
4942 /* When high resolution timer goes off this function is called. */
4943 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4944 {
4945         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4946                                                   hrt);
4947         sdebug_q_cmd_complete(sd_dp);
4948         return HRTIMER_NORESTART;
4949 }
4950
4951 /* When work queue schedules work, it calls this function. */
4952 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4953 {
4954         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4955                                                   ew.work);
4956         sdebug_q_cmd_complete(sd_dp);
4957 }
4958
4959 static bool got_shared_uuid;
4960 static uuid_t shared_uuid;
4961
4962 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4963 {
4964         struct sdeb_zone_state *zsp;
4965         sector_t capacity = get_sdebug_capacity();
4966         sector_t conv_capacity;
4967         sector_t zstart = 0;
4968         unsigned int i;
4969
4970         /*
4971          * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4972          * a zone size allowing for at least 4 zones on the device. Otherwise,
4973          * use the specified zone size checking that at least 2 zones can be
4974          * created for the device.
4975          */
4976         if (!sdeb_zbc_zone_size_mb) {
4977                 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4978                         >> ilog2(sdebug_sector_size);
4979                 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4980                         devip->zsize >>= 1;
4981                 if (devip->zsize < 2) {
4982                         pr_err("Device capacity too small\n");
4983                         return -EINVAL;
4984                 }
4985         } else {
4986                 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4987                         pr_err("Zone size is not a power of 2\n");
4988                         return -EINVAL;
4989                 }
4990                 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4991                         >> ilog2(sdebug_sector_size);
4992                 if (devip->zsize >= capacity) {
4993                         pr_err("Zone size too large for device capacity\n");
4994                         return -EINVAL;
4995                 }
4996         }
4997
4998         devip->zsize_shift = ilog2(devip->zsize);
4999         devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5000
5001         if (sdeb_zbc_zone_cap_mb == 0) {
5002                 devip->zcap = devip->zsize;
5003         } else {
5004                 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5005                               ilog2(sdebug_sector_size);
5006                 if (devip->zcap > devip->zsize) {
5007                         pr_err("Zone capacity too large\n");
5008                         return -EINVAL;
5009                 }
5010         }
5011
5012         conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5013         if (conv_capacity >= capacity) {
5014                 pr_err("Number of conventional zones too large\n");
5015                 return -EINVAL;
5016         }
5017         devip->nr_conv_zones = sdeb_zbc_nr_conv;
5018         devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5019                               devip->zsize_shift;
5020         devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5021
5022         /* Add gap zones if zone capacity is smaller than the zone size */
5023         if (devip->zcap < devip->zsize)
5024                 devip->nr_zones += devip->nr_seq_zones;
5025
5026         if (devip->zmodel == BLK_ZONED_HM) {
5027                 /* zbc_max_open_zones can be 0, meaning "not reported" */
5028                 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5029                         devip->max_open = (devip->nr_zones - 1) / 2;
5030                 else
5031                         devip->max_open = sdeb_zbc_max_open;
5032         }
5033
5034         devip->zstate = kcalloc(devip->nr_zones,
5035                                 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5036         if (!devip->zstate)
5037                 return -ENOMEM;
5038
5039         for (i = 0; i < devip->nr_zones; i++) {
5040                 zsp = &devip->zstate[i];
5041
5042                 zsp->z_start = zstart;
5043
5044                 if (i < devip->nr_conv_zones) {
5045                         zsp->z_type = ZBC_ZTYPE_CNV;
5046                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5047                         zsp->z_wp = (sector_t)-1;
5048                         zsp->z_size =
5049                                 min_t(u64, devip->zsize, capacity - zstart);
5050                 } else if ((zstart & (devip->zsize - 1)) == 0) {
5051                         if (devip->zmodel == BLK_ZONED_HM)
5052                                 zsp->z_type = ZBC_ZTYPE_SWR;
5053                         else
5054                                 zsp->z_type = ZBC_ZTYPE_SWP;
5055                         zsp->z_cond = ZC1_EMPTY;
5056                         zsp->z_wp = zsp->z_start;
5057                         zsp->z_size =
5058                                 min_t(u64, devip->zcap, capacity - zstart);
5059                 } else {
5060                         zsp->z_type = ZBC_ZTYPE_GAP;
5061                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5062                         zsp->z_wp = (sector_t)-1;
5063                         zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5064                                             capacity - zstart);
5065                 }
5066
5067                 WARN_ON_ONCE((int)zsp->z_size <= 0);
5068                 zstart += zsp->z_size;
5069         }
5070
5071         return 0;
5072 }
5073
5074 static struct sdebug_dev_info *sdebug_device_create(
5075                         struct sdebug_host_info *sdbg_host, gfp_t flags)
5076 {
5077         struct sdebug_dev_info *devip;
5078
5079         devip = kzalloc(sizeof(*devip), flags);
5080         if (devip) {
5081                 if (sdebug_uuid_ctl == 1)
5082                         uuid_gen(&devip->lu_name);
5083                 else if (sdebug_uuid_ctl == 2) {
5084                         if (got_shared_uuid)
5085                                 devip->lu_name = shared_uuid;
5086                         else {
5087                                 uuid_gen(&shared_uuid);
5088                                 got_shared_uuid = true;
5089                                 devip->lu_name = shared_uuid;
5090                         }
5091                 }
5092                 devip->sdbg_host = sdbg_host;
5093                 if (sdeb_zbc_in_use) {
5094                         devip->zmodel = sdeb_zbc_model;
5095                         if (sdebug_device_create_zones(devip)) {
5096                                 kfree(devip);
5097                                 return NULL;
5098                         }
5099                 } else {
5100                         devip->zmodel = BLK_ZONED_NONE;
5101                 }
5102                 devip->create_ts = ktime_get_boottime();
5103                 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5104                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5105         }
5106         return devip;
5107 }
5108
5109 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5110 {
5111         struct sdebug_host_info *sdbg_host;
5112         struct sdebug_dev_info *open_devip = NULL;
5113         struct sdebug_dev_info *devip;
5114
5115         sdbg_host = shost_to_sdebug_host(sdev->host);
5116
5117         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5118                 if ((devip->used) && (devip->channel == sdev->channel) &&
5119                     (devip->target == sdev->id) &&
5120                     (devip->lun == sdev->lun))
5121                         return devip;
5122                 else {
5123                         if ((!devip->used) && (!open_devip))
5124                                 open_devip = devip;
5125                 }
5126         }
5127         if (!open_devip) { /* try and make a new one */
5128                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5129                 if (!open_devip) {
5130                         pr_err("out of memory at line %d\n", __LINE__);
5131                         return NULL;
5132                 }
5133         }
5134
5135         open_devip->channel = sdev->channel;
5136         open_devip->target = sdev->id;
5137         open_devip->lun = sdev->lun;
5138         open_devip->sdbg_host = sdbg_host;
5139         set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5140         open_devip->used = true;
5141         return open_devip;
5142 }
5143
5144 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5145 {
5146         if (sdebug_verbose)
5147                 pr_info("slave_alloc <%u %u %u %llu>\n",
5148                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5149         return 0;
5150 }
5151
5152 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5153 {
5154         struct sdebug_dev_info *devip =
5155                         (struct sdebug_dev_info *)sdp->hostdata;
5156
5157         if (sdebug_verbose)
5158                 pr_info("slave_configure <%u %u %u %llu>\n",
5159                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5160         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5161                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5162         if (devip == NULL) {
5163                 devip = find_build_dev_info(sdp);
5164                 if (devip == NULL)
5165                         return 1;  /* no resources, will be marked offline */
5166         }
5167         sdp->hostdata = devip;
5168         if (sdebug_no_uld)
5169                 sdp->no_uld_attach = 1;
5170         config_cdb_len(sdp);
5171         return 0;
5172 }
5173
5174 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5175 {
5176         struct sdebug_dev_info *devip =
5177                 (struct sdebug_dev_info *)sdp->hostdata;
5178
5179         if (sdebug_verbose)
5180                 pr_info("slave_destroy <%u %u %u %llu>\n",
5181                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5182         if (devip) {
5183                 /* make this slot available for re-use */
5184                 devip->used = false;
5185                 sdp->hostdata = NULL;
5186         }
5187 }
5188
5189 /* Returns true if we require the queued memory to be freed by the caller. */
5190 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5191                            enum sdeb_defer_type defer_t)
5192 {
5193         if (defer_t == SDEB_DEFER_HRT) {
5194                 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5195
5196                 switch (res) {
5197                 case 0: /* Not active, it must have already run */
5198                 case -1: /* -1 It's executing the CB */
5199                         return false;
5200                 case 1: /* Was active, we've now cancelled */
5201                 default:
5202                         return true;
5203                 }
5204         } else if (defer_t == SDEB_DEFER_WQ) {
5205                 /* Cancel if pending */
5206                 if (cancel_work_sync(&sd_dp->ew.work))
5207                         return true;
5208                 /* Was not pending, so it must have run */
5209                 return false;
5210         } else if (defer_t == SDEB_DEFER_POLL) {
5211                 return true;
5212         }
5213
5214         return false;
5215 }
5216
5217
5218 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5219 {
5220         enum sdeb_defer_type l_defer_t;
5221         struct sdebug_defer *sd_dp;
5222         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5223         struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5224
5225         lockdep_assert_held(&sdsc->lock);
5226
5227         if (!sqcp)
5228                 return false;
5229         sd_dp = &sqcp->sd_dp;
5230         l_defer_t = READ_ONCE(sd_dp->defer_t);
5231         ASSIGN_QUEUED_CMD(cmnd, NULL);
5232
5233         if (stop_qc_helper(sd_dp, l_defer_t))
5234                 sdebug_free_queued_cmd(sqcp);
5235
5236         return true;
5237 }
5238
5239 /*
5240  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5241  */
5242 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5243 {
5244         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5245         unsigned long flags;
5246         bool res;
5247
5248         spin_lock_irqsave(&sdsc->lock, flags);
5249         res = scsi_debug_stop_cmnd(cmnd);
5250         spin_unlock_irqrestore(&sdsc->lock, flags);
5251
5252         return res;
5253 }
5254
5255 /*
5256  * All we can do is set the cmnd as internally aborted and wait for it to
5257  * finish. We cannot call scsi_done() as normal completion path may do that.
5258  */
5259 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5260 {
5261         scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5262
5263         return true;
5264 }
5265
5266 /* Deletes (stops) timers or work queues of all queued commands */
5267 static void stop_all_queued(void)
5268 {
5269         struct sdebug_host_info *sdhp;
5270
5271         mutex_lock(&sdebug_host_list_mutex);
5272         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5273                 struct Scsi_Host *shost = sdhp->shost;
5274
5275                 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5276         }
5277         mutex_unlock(&sdebug_host_list_mutex);
5278 }
5279
5280 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5281 {
5282         bool ok = scsi_debug_abort_cmnd(SCpnt);
5283
5284         ++num_aborts;
5285
5286         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5287                 sdev_printk(KERN_INFO, SCpnt->device,
5288                             "%s: command%s found\n", __func__,
5289                             ok ? "" : " not");
5290
5291         return SUCCESS;
5292 }
5293
5294 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5295 {
5296         struct scsi_device *sdp = data;
5297         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5298
5299         if (scmd->device == sdp)
5300                 scsi_debug_abort_cmnd(scmd);
5301
5302         return true;
5303 }
5304
5305 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5306 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5307 {
5308         struct Scsi_Host *shost = sdp->host;
5309
5310         blk_mq_tagset_busy_iter(&shost->tag_set,
5311                                 scsi_debug_stop_all_queued_iter, sdp);
5312 }
5313
5314 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5315 {
5316         struct scsi_device *sdp = SCpnt->device;
5317         struct sdebug_dev_info *devip = sdp->hostdata;
5318
5319         ++num_dev_resets;
5320
5321         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5322                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5323
5324         scsi_debug_stop_all_queued(sdp);
5325         if (devip)
5326                 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5327
5328         return SUCCESS;
5329 }
5330
5331 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5332 {
5333         struct scsi_device *sdp = SCpnt->device;
5334         struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5335         struct sdebug_dev_info *devip;
5336         int k = 0;
5337
5338         ++num_target_resets;
5339         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5340                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5341
5342         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5343                 if (devip->target == sdp->id) {
5344                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5345                         ++k;
5346                 }
5347         }
5348
5349         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5350                 sdev_printk(KERN_INFO, sdp,
5351                             "%s: %d device(s) found in target\n", __func__, k);
5352
5353         return SUCCESS;
5354 }
5355
5356 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5357 {
5358         struct scsi_device *sdp = SCpnt->device;
5359         struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5360         struct sdebug_dev_info *devip;
5361         int k = 0;
5362
5363         ++num_bus_resets;
5364
5365         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5366                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5367
5368         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5369                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5370                 ++k;
5371         }
5372
5373         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5374                 sdev_printk(KERN_INFO, sdp,
5375                             "%s: %d device(s) found in host\n", __func__, k);
5376         return SUCCESS;
5377 }
5378
5379 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5380 {
5381         struct sdebug_host_info *sdbg_host;
5382         struct sdebug_dev_info *devip;
5383         int k = 0;
5384
5385         ++num_host_resets;
5386         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5387                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5388         mutex_lock(&sdebug_host_list_mutex);
5389         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5390                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5391                                     dev_list) {
5392                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5393                         ++k;
5394                 }
5395         }
5396         mutex_unlock(&sdebug_host_list_mutex);
5397         stop_all_queued();
5398         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5399                 sdev_printk(KERN_INFO, SCpnt->device,
5400                             "%s: %d device(s) found\n", __func__, k);
5401         return SUCCESS;
5402 }
5403
5404 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5405 {
5406         struct msdos_partition *pp;
5407         int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5408         int sectors_per_part, num_sectors, k;
5409         int heads_by_sects, start_sec, end_sec;
5410
5411         /* assume partition table already zeroed */
5412         if ((sdebug_num_parts < 1) || (store_size < 1048576))
5413                 return;
5414         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5415                 sdebug_num_parts = SDEBUG_MAX_PARTS;
5416                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5417         }
5418         num_sectors = (int)get_sdebug_capacity();
5419         sectors_per_part = (num_sectors - sdebug_sectors_per)
5420                            / sdebug_num_parts;
5421         heads_by_sects = sdebug_heads * sdebug_sectors_per;
5422         starts[0] = sdebug_sectors_per;
5423         max_part_secs = sectors_per_part;
5424         for (k = 1; k < sdebug_num_parts; ++k) {
5425                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5426                             * heads_by_sects;
5427                 if (starts[k] - starts[k - 1] < max_part_secs)
5428                         max_part_secs = starts[k] - starts[k - 1];
5429         }
5430         starts[sdebug_num_parts] = num_sectors;
5431         starts[sdebug_num_parts + 1] = 0;
5432
5433         ramp[510] = 0x55;       /* magic partition markings */
5434         ramp[511] = 0xAA;
5435         pp = (struct msdos_partition *)(ramp + 0x1be);
5436         for (k = 0; starts[k + 1]; ++k, ++pp) {
5437                 start_sec = starts[k];
5438                 end_sec = starts[k] + max_part_secs - 1;
5439                 pp->boot_ind = 0;
5440
5441                 pp->cyl = start_sec / heads_by_sects;
5442                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5443                            / sdebug_sectors_per;
5444                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5445
5446                 pp->end_cyl = end_sec / heads_by_sects;
5447                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5448                                / sdebug_sectors_per;
5449                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5450
5451                 pp->start_sect = cpu_to_le32(start_sec);
5452                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5453                 pp->sys_ind = 0x83;     /* plain Linux partition */
5454         }
5455 }
5456
5457 static void block_unblock_all_queues(bool block)
5458 {
5459         struct sdebug_host_info *sdhp;
5460
5461         lockdep_assert_held(&sdebug_host_list_mutex);
5462
5463         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5464                 struct Scsi_Host *shost = sdhp->shost;
5465
5466                 if (block)
5467                         scsi_block_requests(shost);
5468                 else
5469                         scsi_unblock_requests(shost);
5470         }
5471 }
5472
5473 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5474  * commands will be processed normally before triggers occur.
5475  */
5476 static void tweak_cmnd_count(void)
5477 {
5478         int count, modulo;
5479
5480         modulo = abs(sdebug_every_nth);
5481         if (modulo < 2)
5482                 return;
5483
5484         mutex_lock(&sdebug_host_list_mutex);
5485         block_unblock_all_queues(true);
5486         count = atomic_read(&sdebug_cmnd_count);
5487         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5488         block_unblock_all_queues(false);
5489         mutex_unlock(&sdebug_host_list_mutex);
5490 }
5491
5492 static void clear_queue_stats(void)
5493 {
5494         atomic_set(&sdebug_cmnd_count, 0);
5495         atomic_set(&sdebug_completions, 0);
5496         atomic_set(&sdebug_miss_cpus, 0);
5497         atomic_set(&sdebug_a_tsf, 0);
5498 }
5499
5500 static bool inject_on_this_cmd(void)
5501 {
5502         if (sdebug_every_nth == 0)
5503                 return false;
5504         return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5505 }
5506
5507 #define INCLUSIVE_TIMING_MAX_NS 1000000         /* 1 millisecond */
5508
5509
5510 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5511 {
5512         if (sqcp)
5513                 kmem_cache_free(queued_cmd_cache, sqcp);
5514 }
5515
5516 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5517 {
5518         struct sdebug_queued_cmd *sqcp;
5519         struct sdebug_defer *sd_dp;
5520
5521         sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5522         if (!sqcp)
5523                 return NULL;
5524
5525         sd_dp = &sqcp->sd_dp;
5526
5527         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5528         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5529         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5530
5531         sqcp->scmd = scmd;
5532
5533         return sqcp;
5534 }
5535
5536 /* Complete the processing of the thread that queued a SCSI command to this
5537  * driver. It either completes the command by calling cmnd_done() or
5538  * schedules a hr timer or work queue then returns 0. Returns
5539  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5540  */
5541 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5542                          int scsi_result,
5543                          int (*pfp)(struct scsi_cmnd *,
5544                                     struct sdebug_dev_info *),
5545                          int delta_jiff, int ndelay)
5546 {
5547         struct request *rq = scsi_cmd_to_rq(cmnd);
5548         bool polled = rq->cmd_flags & REQ_POLLED;
5549         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5550         unsigned long flags;
5551         u64 ns_from_boot = 0;
5552         struct sdebug_queued_cmd *sqcp;
5553         struct scsi_device *sdp;
5554         struct sdebug_defer *sd_dp;
5555
5556         if (unlikely(devip == NULL)) {
5557                 if (scsi_result == 0)
5558                         scsi_result = DID_NO_CONNECT << 16;
5559                 goto respond_in_thread;
5560         }
5561         sdp = cmnd->device;
5562
5563         if (delta_jiff == 0)
5564                 goto respond_in_thread;
5565
5566
5567         if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5568                      (scsi_result == 0))) {
5569                 int num_in_q = scsi_device_busy(sdp);
5570                 int qdepth = cmnd->device->queue_depth;
5571
5572                 if ((num_in_q == qdepth) &&
5573                     (atomic_inc_return(&sdebug_a_tsf) >=
5574                      abs(sdebug_every_nth))) {
5575                         atomic_set(&sdebug_a_tsf, 0);
5576                         scsi_result = device_qfull_result;
5577
5578                         if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5579                                 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5580                                             __func__, num_in_q);
5581                 }
5582         }
5583
5584         sqcp = sdebug_alloc_queued_cmd(cmnd);
5585         if (!sqcp) {
5586                 pr_err("%s no alloc\n", __func__);
5587                 return SCSI_MLQUEUE_HOST_BUSY;
5588         }
5589         sd_dp = &sqcp->sd_dp;
5590
5591         if (polled)
5592                 ns_from_boot = ktime_get_boottime_ns();
5593
5594         /* one of the resp_*() response functions is called here */
5595         cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5596         if (cmnd->result & SDEG_RES_IMMED_MASK) {
5597                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5598                 delta_jiff = ndelay = 0;
5599         }
5600         if (cmnd->result == 0 && scsi_result != 0)
5601                 cmnd->result = scsi_result;
5602         if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5603                 if (atomic_read(&sdeb_inject_pending)) {
5604                         mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5605                         atomic_set(&sdeb_inject_pending, 0);
5606                         cmnd->result = check_condition_result;
5607                 }
5608         }
5609
5610         if (unlikely(sdebug_verbose && cmnd->result))
5611                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5612                             __func__, cmnd->result);
5613
5614         if (delta_jiff > 0 || ndelay > 0) {
5615                 ktime_t kt;
5616
5617                 if (delta_jiff > 0) {
5618                         u64 ns = jiffies_to_nsecs(delta_jiff);
5619
5620                         if (sdebug_random && ns < U32_MAX) {
5621                                 ns = get_random_u32_below((u32)ns);
5622                         } else if (sdebug_random) {
5623                                 ns >>= 12;      /* scale to 4 usec precision */
5624                                 if (ns < U32_MAX)       /* over 4 hours max */
5625                                         ns = get_random_u32_below((u32)ns);
5626                                 ns <<= 12;
5627                         }
5628                         kt = ns_to_ktime(ns);
5629                 } else {        /* ndelay has a 4.2 second max */
5630                         kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5631                                              (u32)ndelay;
5632                         if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5633                                 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5634
5635                                 if (kt <= d) {  /* elapsed duration >= kt */
5636                                         /* call scsi_done() from this thread */
5637                                         sdebug_free_queued_cmd(sqcp);
5638                                         scsi_done(cmnd);
5639                                         return 0;
5640                                 }
5641                                 /* otherwise reduce kt by elapsed time */
5642                                 kt -= d;
5643                         }
5644                 }
5645                 if (sdebug_statistics)
5646                         sd_dp->issuing_cpu = raw_smp_processor_id();
5647                 if (polled) {
5648                         spin_lock_irqsave(&sdsc->lock, flags);
5649                         sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5650                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
5651                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5652                         spin_unlock_irqrestore(&sdsc->lock, flags);
5653                 } else {
5654                         /* schedule the invocation of scsi_done() for a later time */
5655                         spin_lock_irqsave(&sdsc->lock, flags);
5656                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
5657                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5658                         hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5659                         /*
5660                          * The completion handler will try to grab sqcp->lock,
5661                          * so there is no chance that the completion handler
5662                          * will call scsi_done() until we release the lock
5663                          * here (so ok to keep referencing sdsc).
5664                          */
5665                         spin_unlock_irqrestore(&sdsc->lock, flags);
5666                 }
5667         } else {        /* jdelay < 0, use work queue */
5668                 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5669                              atomic_read(&sdeb_inject_pending))) {
5670                         sd_dp->aborted = true;
5671                         atomic_set(&sdeb_inject_pending, 0);
5672                         sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5673                                     blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5674                 }
5675
5676                 if (sdebug_statistics)
5677                         sd_dp->issuing_cpu = raw_smp_processor_id();
5678                 if (polled) {
5679                         spin_lock_irqsave(&sdsc->lock, flags);
5680                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
5681                         sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5682                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5683                         spin_unlock_irqrestore(&sdsc->lock, flags);
5684                 } else {
5685                         spin_lock_irqsave(&sdsc->lock, flags);
5686                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
5687                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5688                         schedule_work(&sd_dp->ew.work);
5689                         spin_unlock_irqrestore(&sdsc->lock, flags);
5690                 }
5691         }
5692
5693         return 0;
5694
5695 respond_in_thread:      /* call back to mid-layer using invocation thread */
5696         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5697         cmnd->result &= ~SDEG_RES_IMMED_MASK;
5698         if (cmnd->result == 0 && scsi_result != 0)
5699                 cmnd->result = scsi_result;
5700         scsi_done(cmnd);
5701         return 0;
5702 }
5703
5704 /* Note: The following macros create attribute files in the
5705    /sys/module/scsi_debug/parameters directory. Unfortunately this
5706    driver is unaware of a change and cannot trigger auxiliary actions
5707    as it can when the corresponding attribute in the
5708    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5709  */
5710 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5711 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5712 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5713 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5714 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5715 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5716 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5717 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5718 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5719 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5720 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5721 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5722 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5723 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5724 module_param_string(inq_product, sdebug_inq_product_id,
5725                     sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5726 module_param_string(inq_rev, sdebug_inq_product_rev,
5727                     sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5728 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5729                     sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5730 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5731 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5732 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5733 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5734 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5735 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5736 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5737 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5738 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5739                    S_IRUGO | S_IWUSR);
5740 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5741                    S_IRUGO | S_IWUSR);
5742 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5743 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5744 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5745 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5746 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5747 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5748 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5749 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5750 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5751 module_param_named(per_host_store, sdebug_per_host_store, bool,
5752                    S_IRUGO | S_IWUSR);
5753 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5754 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5755 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5756 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5757 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5758 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5759 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5760 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5761 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5762 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5763 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5764 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5765 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5766 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5767 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5768 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5769 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5770 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5771                    S_IRUGO | S_IWUSR);
5772 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5773 module_param_named(write_same_length, sdebug_write_same_length, int,
5774                    S_IRUGO | S_IWUSR);
5775 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5776 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5777 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5778 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5779 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5780
5781 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5782 MODULE_DESCRIPTION("SCSI debug adapter driver");
5783 MODULE_LICENSE("GPL");
5784 MODULE_VERSION(SDEBUG_VERSION);
5785
5786 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5787 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5788 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5789 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5790 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5791 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5792 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5793 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5794 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5795 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5796 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5797 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5798 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5799 MODULE_PARM_DESC(host_max_queue,
5800                  "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5801 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5802 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5803                  SDEBUG_VERSION "\")");
5804 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5805 MODULE_PARM_DESC(lbprz,
5806                  "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5807 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5808 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5809 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5810 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5811 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5812 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5813 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5814 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5815 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5816 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5817 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5818 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5819 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5820 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5821 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5822 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5823 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5824 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5825 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5826 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5827 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5828 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5829 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5830 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5831 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5832 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5833 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5834 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5835 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5836 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5837 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5838 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5839 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5840 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5841 MODULE_PARM_DESC(uuid_ctl,
5842                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5843 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5844 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5845 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5846 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5847 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5848 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5849 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5850 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5851 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5852
5853 #define SDEBUG_INFO_LEN 256
5854 static char sdebug_info[SDEBUG_INFO_LEN];
5855
5856 static const char *scsi_debug_info(struct Scsi_Host *shp)
5857 {
5858         int k;
5859
5860         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5861                       my_name, SDEBUG_VERSION, sdebug_version_date);
5862         if (k >= (SDEBUG_INFO_LEN - 1))
5863                 return sdebug_info;
5864         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5865                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5866                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
5867                   "statistics", (int)sdebug_statistics);
5868         return sdebug_info;
5869 }
5870
5871 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5872 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5873                                  int length)
5874 {
5875         char arr[16];
5876         int opts;
5877         int minLen = length > 15 ? 15 : length;
5878
5879         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5880                 return -EACCES;
5881         memcpy(arr, buffer, minLen);
5882         arr[minLen] = '\0';
5883         if (1 != sscanf(arr, "%d", &opts))
5884                 return -EINVAL;
5885         sdebug_opts = opts;
5886         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5887         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5888         if (sdebug_every_nth != 0)
5889                 tweak_cmnd_count();
5890         return length;
5891 }
5892
5893 struct sdebug_submit_queue_data {
5894         int *first;
5895         int *last;
5896         int queue_num;
5897 };
5898
5899 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
5900 {
5901         struct sdebug_submit_queue_data *data = opaque;
5902         u32 unique_tag = blk_mq_unique_tag(rq);
5903         u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
5904         u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
5905         int queue_num = data->queue_num;
5906
5907         if (hwq != queue_num)
5908                 return true;
5909
5910         /* Rely on iter'ing in ascending tag order */
5911         if (*data->first == -1)
5912                 *data->first = *data->last = tag;
5913         else
5914                 *data->last = tag;
5915
5916         return true;
5917 }
5918
5919 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5920  * same for each scsi_debug host (if more than one). Some of the counters
5921  * output are not atomics so might be inaccurate in a busy system. */
5922 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5923 {
5924         struct sdebug_host_info *sdhp;
5925         int j;
5926
5927         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5928                    SDEBUG_VERSION, sdebug_version_date);
5929         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5930                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5931                    sdebug_opts, sdebug_every_nth);
5932         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5933                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5934                    sdebug_sector_size, "bytes");
5935         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5936                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5937                    num_aborts);
5938         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5939                    num_dev_resets, num_target_resets, num_bus_resets,
5940                    num_host_resets);
5941         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5942                    dix_reads, dix_writes, dif_errors);
5943         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5944                    sdebug_statistics);
5945         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5946                    atomic_read(&sdebug_cmnd_count),
5947                    atomic_read(&sdebug_completions),
5948                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
5949                    atomic_read(&sdebug_a_tsf),
5950                    atomic_read(&sdeb_mq_poll_count));
5951
5952         seq_printf(m, "submit_queues=%d\n", submit_queues);
5953         for (j = 0; j < submit_queues; ++j) {
5954                 int f = -1, l = -1;
5955                 struct sdebug_submit_queue_data data = {
5956                         .queue_num = j,
5957                         .first = &f,
5958                         .last = &l,
5959                 };
5960                 seq_printf(m, "  queue %d:\n", j);
5961                 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
5962                                         &data);
5963                 if (f >= 0) {
5964                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5965                                    "first,last bits", f, l);
5966                 }
5967         }
5968
5969         seq_printf(m, "this host_no=%d\n", host->host_no);
5970         if (!xa_empty(per_store_ap)) {
5971                 bool niu;
5972                 int idx;
5973                 unsigned long l_idx;
5974                 struct sdeb_store_info *sip;
5975
5976                 seq_puts(m, "\nhost list:\n");
5977                 j = 0;
5978                 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5979                         idx = sdhp->si_idx;
5980                         seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5981                                    sdhp->shost->host_no, idx);
5982                         ++j;
5983                 }
5984                 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5985                            sdeb_most_recent_idx);
5986                 j = 0;
5987                 xa_for_each(per_store_ap, l_idx, sip) {
5988                         niu = xa_get_mark(per_store_ap, l_idx,
5989                                           SDEB_XA_NOT_IN_USE);
5990                         idx = (int)l_idx;
5991                         seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5992                                    (niu ? "  not_in_use" : ""));
5993                         ++j;
5994                 }
5995         }
5996         return 0;
5997 }
5998
5999 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6000 {
6001         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6002 }
6003 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6004  * of delay is jiffies.
6005  */
6006 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6007                            size_t count)
6008 {
6009         int jdelay, res;
6010
6011         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6012                 res = count;
6013                 if (sdebug_jdelay != jdelay) {
6014                         struct sdebug_host_info *sdhp;
6015
6016                         mutex_lock(&sdebug_host_list_mutex);
6017                         block_unblock_all_queues(true);
6018
6019                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6020                                 struct Scsi_Host *shost = sdhp->shost;
6021
6022                                 if (scsi_host_busy(shost)) {
6023                                         res = -EBUSY;   /* queued commands */
6024                                         break;
6025                                 }
6026                         }
6027                         if (res > 0) {
6028                                 sdebug_jdelay = jdelay;
6029                                 sdebug_ndelay = 0;
6030                         }
6031                         block_unblock_all_queues(false);
6032                         mutex_unlock(&sdebug_host_list_mutex);
6033                 }
6034                 return res;
6035         }
6036         return -EINVAL;
6037 }
6038 static DRIVER_ATTR_RW(delay);
6039
6040 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6041 {
6042         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6043 }
6044 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6045 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6046 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6047                             size_t count)
6048 {
6049         int ndelay, res;
6050
6051         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6052             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6053                 res = count;
6054                 if (sdebug_ndelay != ndelay) {
6055                         struct sdebug_host_info *sdhp;
6056
6057                         mutex_lock(&sdebug_host_list_mutex);
6058                         block_unblock_all_queues(true);
6059
6060                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6061                                 struct Scsi_Host *shost = sdhp->shost;
6062
6063                                 if (scsi_host_busy(shost)) {
6064                                         res = -EBUSY;   /* queued commands */
6065                                         break;
6066                                 }
6067                         }
6068
6069                         if (res > 0) {
6070                                 sdebug_ndelay = ndelay;
6071                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6072                                                         : DEF_JDELAY;
6073                         }
6074                         block_unblock_all_queues(false);
6075                         mutex_unlock(&sdebug_host_list_mutex);
6076                 }
6077                 return res;
6078         }
6079         return -EINVAL;
6080 }
6081 static DRIVER_ATTR_RW(ndelay);
6082
6083 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6084 {
6085         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6086 }
6087
6088 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6089                           size_t count)
6090 {
6091         int opts;
6092         char work[20];
6093
6094         if (sscanf(buf, "%10s", work) == 1) {
6095                 if (strncasecmp(work, "0x", 2) == 0) {
6096                         if (kstrtoint(work + 2, 16, &opts) == 0)
6097                                 goto opts_done;
6098                 } else {
6099                         if (kstrtoint(work, 10, &opts) == 0)
6100                                 goto opts_done;
6101                 }
6102         }
6103         return -EINVAL;
6104 opts_done:
6105         sdebug_opts = opts;
6106         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6107         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6108         tweak_cmnd_count();
6109         return count;
6110 }
6111 static DRIVER_ATTR_RW(opts);
6112
6113 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6114 {
6115         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6116 }
6117 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6118                            size_t count)
6119 {
6120         int n;
6121
6122         /* Cannot change from or to TYPE_ZBC with sysfs */
6123         if (sdebug_ptype == TYPE_ZBC)
6124                 return -EINVAL;
6125
6126         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6127                 if (n == TYPE_ZBC)
6128                         return -EINVAL;
6129                 sdebug_ptype = n;
6130                 return count;
6131         }
6132         return -EINVAL;
6133 }
6134 static DRIVER_ATTR_RW(ptype);
6135
6136 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6137 {
6138         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6139 }
6140 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6141                             size_t count)
6142 {
6143         int n;
6144
6145         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6146                 sdebug_dsense = n;
6147                 return count;
6148         }
6149         return -EINVAL;
6150 }
6151 static DRIVER_ATTR_RW(dsense);
6152
6153 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6154 {
6155         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6156 }
6157 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6158                              size_t count)
6159 {
6160         int n, idx;
6161
6162         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6163                 bool want_store = (n == 0);
6164                 struct sdebug_host_info *sdhp;
6165
6166                 n = (n > 0);
6167                 sdebug_fake_rw = (sdebug_fake_rw > 0);
6168                 if (sdebug_fake_rw == n)
6169                         return count;   /* not transitioning so do nothing */
6170
6171                 if (want_store) {       /* 1 --> 0 transition, set up store */
6172                         if (sdeb_first_idx < 0) {
6173                                 idx = sdebug_add_store();
6174                                 if (idx < 0)
6175                                         return idx;
6176                         } else {
6177                                 idx = sdeb_first_idx;
6178                                 xa_clear_mark(per_store_ap, idx,
6179                                               SDEB_XA_NOT_IN_USE);
6180                         }
6181                         /* make all hosts use same store */
6182                         list_for_each_entry(sdhp, &sdebug_host_list,
6183                                             host_list) {
6184                                 if (sdhp->si_idx != idx) {
6185                                         xa_set_mark(per_store_ap, sdhp->si_idx,
6186                                                     SDEB_XA_NOT_IN_USE);
6187                                         sdhp->si_idx = idx;
6188                                 }
6189                         }
6190                         sdeb_most_recent_idx = idx;
6191                 } else {        /* 0 --> 1 transition is trigger for shrink */
6192                         sdebug_erase_all_stores(true /* apart from first */);
6193                 }
6194                 sdebug_fake_rw = n;
6195                 return count;
6196         }
6197         return -EINVAL;
6198 }
6199 static DRIVER_ATTR_RW(fake_rw);
6200
6201 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6202 {
6203         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6204 }
6205 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6206                               size_t count)
6207 {
6208         int n;
6209
6210         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6211                 sdebug_no_lun_0 = n;
6212                 return count;
6213         }
6214         return -EINVAL;
6215 }
6216 static DRIVER_ATTR_RW(no_lun_0);
6217
6218 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6219 {
6220         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6221 }
6222 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6223                               size_t count)
6224 {
6225         int n;
6226
6227         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6228                 sdebug_num_tgts = n;
6229                 sdebug_max_tgts_luns();
6230                 return count;
6231         }
6232         return -EINVAL;
6233 }
6234 static DRIVER_ATTR_RW(num_tgts);
6235
6236 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6237 {
6238         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6239 }
6240 static DRIVER_ATTR_RO(dev_size_mb);
6241
6242 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6243 {
6244         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6245 }
6246
6247 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6248                                     size_t count)
6249 {
6250         bool v;
6251
6252         if (kstrtobool(buf, &v))
6253                 return -EINVAL;
6254
6255         sdebug_per_host_store = v;
6256         return count;
6257 }
6258 static DRIVER_ATTR_RW(per_host_store);
6259
6260 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6261 {
6262         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6263 }
6264 static DRIVER_ATTR_RO(num_parts);
6265
6266 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6267 {
6268         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6269 }
6270 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6271                                size_t count)
6272 {
6273         int nth;
6274         char work[20];
6275
6276         if (sscanf(buf, "%10s", work) == 1) {
6277                 if (strncasecmp(work, "0x", 2) == 0) {
6278                         if (kstrtoint(work + 2, 16, &nth) == 0)
6279                                 goto every_nth_done;
6280                 } else {
6281                         if (kstrtoint(work, 10, &nth) == 0)
6282                                 goto every_nth_done;
6283                 }
6284         }
6285         return -EINVAL;
6286
6287 every_nth_done:
6288         sdebug_every_nth = nth;
6289         if (nth && !sdebug_statistics) {
6290                 pr_info("every_nth needs statistics=1, set it\n");
6291                 sdebug_statistics = true;
6292         }
6293         tweak_cmnd_count();
6294         return count;
6295 }
6296 static DRIVER_ATTR_RW(every_nth);
6297
6298 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6299 {
6300         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6301 }
6302 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6303                                 size_t count)
6304 {
6305         int n;
6306         bool changed;
6307
6308         if (kstrtoint(buf, 0, &n))
6309                 return -EINVAL;
6310         if (n >= 0) {
6311                 if (n > (int)SAM_LUN_AM_FLAT) {
6312                         pr_warn("only LUN address methods 0 and 1 are supported\n");
6313                         return -EINVAL;
6314                 }
6315                 changed = ((int)sdebug_lun_am != n);
6316                 sdebug_lun_am = n;
6317                 if (changed && sdebug_scsi_level >= 5) {        /* >= SPC-3 */
6318                         struct sdebug_host_info *sdhp;
6319                         struct sdebug_dev_info *dp;
6320
6321                         mutex_lock(&sdebug_host_list_mutex);
6322                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6323                                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6324                                         set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6325                                 }
6326                         }
6327                         mutex_unlock(&sdebug_host_list_mutex);
6328                 }
6329                 return count;
6330         }
6331         return -EINVAL;
6332 }
6333 static DRIVER_ATTR_RW(lun_format);
6334
6335 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6336 {
6337         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6338 }
6339 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6340                               size_t count)
6341 {
6342         int n;
6343         bool changed;
6344
6345         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6346                 if (n > 256) {
6347                         pr_warn("max_luns can be no more than 256\n");
6348                         return -EINVAL;
6349                 }
6350                 changed = (sdebug_max_luns != n);
6351                 sdebug_max_luns = n;
6352                 sdebug_max_tgts_luns();
6353                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
6354                         struct sdebug_host_info *sdhp;
6355                         struct sdebug_dev_info *dp;
6356
6357                         mutex_lock(&sdebug_host_list_mutex);
6358                         list_for_each_entry(sdhp, &sdebug_host_list,
6359                                             host_list) {
6360                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6361                                                     dev_list) {
6362                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
6363                                                 dp->uas_bm);
6364                                 }
6365                         }
6366                         mutex_unlock(&sdebug_host_list_mutex);
6367                 }
6368                 return count;
6369         }
6370         return -EINVAL;
6371 }
6372 static DRIVER_ATTR_RW(max_luns);
6373
6374 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6375 {
6376         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6377 }
6378 /* N.B. max_queue can be changed while there are queued commands. In flight
6379  * commands beyond the new max_queue will be completed. */
6380 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6381                                size_t count)
6382 {
6383         int n;
6384
6385         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6386             (n <= SDEBUG_CANQUEUE) &&
6387             (sdebug_host_max_queue == 0)) {
6388                 mutex_lock(&sdebug_host_list_mutex);
6389
6390                 /* We may only change sdebug_max_queue when we have no shosts */
6391                 if (list_empty(&sdebug_host_list))
6392                         sdebug_max_queue = n;
6393                 else
6394                         count = -EBUSY;
6395                 mutex_unlock(&sdebug_host_list_mutex);
6396                 return count;
6397         }
6398         return -EINVAL;
6399 }
6400 static DRIVER_ATTR_RW(max_queue);
6401
6402 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6403 {
6404         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6405 }
6406
6407 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6408 {
6409         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6410 }
6411
6412 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6413 {
6414         bool v;
6415
6416         if (kstrtobool(buf, &v))
6417                 return -EINVAL;
6418
6419         sdebug_no_rwlock = v;
6420         return count;
6421 }
6422 static DRIVER_ATTR_RW(no_rwlock);
6423
6424 /*
6425  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6426  * in range [0, sdebug_host_max_queue), we can't change it.
6427  */
6428 static DRIVER_ATTR_RO(host_max_queue);
6429
6430 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6431 {
6432         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6433 }
6434 static DRIVER_ATTR_RO(no_uld);
6435
6436 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6437 {
6438         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6439 }
6440 static DRIVER_ATTR_RO(scsi_level);
6441
6442 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6443 {
6444         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6445 }
6446 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6447                                 size_t count)
6448 {
6449         int n;
6450         bool changed;
6451
6452         /* Ignore capacity change for ZBC drives for now */
6453         if (sdeb_zbc_in_use)
6454                 return -ENOTSUPP;
6455
6456         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6457                 changed = (sdebug_virtual_gb != n);
6458                 sdebug_virtual_gb = n;
6459                 sdebug_capacity = get_sdebug_capacity();
6460                 if (changed) {
6461                         struct sdebug_host_info *sdhp;
6462                         struct sdebug_dev_info *dp;
6463
6464                         mutex_lock(&sdebug_host_list_mutex);
6465                         list_for_each_entry(sdhp, &sdebug_host_list,
6466                                             host_list) {
6467                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6468                                                     dev_list) {
6469                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6470                                                 dp->uas_bm);
6471                                 }
6472                         }
6473                         mutex_unlock(&sdebug_host_list_mutex);
6474                 }
6475                 return count;
6476         }
6477         return -EINVAL;
6478 }
6479 static DRIVER_ATTR_RW(virtual_gb);
6480
6481 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6482 {
6483         /* absolute number of hosts currently active is what is shown */
6484         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6485 }
6486
6487 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6488                               size_t count)
6489 {
6490         bool found;
6491         unsigned long idx;
6492         struct sdeb_store_info *sip;
6493         bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6494         int delta_hosts;
6495
6496         if (sscanf(buf, "%d", &delta_hosts) != 1)
6497                 return -EINVAL;
6498         if (delta_hosts > 0) {
6499                 do {
6500                         found = false;
6501                         if (want_phs) {
6502                                 xa_for_each_marked(per_store_ap, idx, sip,
6503                                                    SDEB_XA_NOT_IN_USE) {
6504                                         sdeb_most_recent_idx = (int)idx;
6505                                         found = true;
6506                                         break;
6507                                 }
6508                                 if (found)      /* re-use case */
6509                                         sdebug_add_host_helper((int)idx);
6510                                 else
6511                                         sdebug_do_add_host(true);
6512                         } else {
6513                                 sdebug_do_add_host(false);
6514                         }
6515                 } while (--delta_hosts);
6516         } else if (delta_hosts < 0) {
6517                 do {
6518                         sdebug_do_remove_host(false);
6519                 } while (++delta_hosts);
6520         }
6521         return count;
6522 }
6523 static DRIVER_ATTR_RW(add_host);
6524
6525 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6526 {
6527         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6528 }
6529 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6530                                     size_t count)
6531 {
6532         int n;
6533
6534         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6535                 sdebug_vpd_use_hostno = n;
6536                 return count;
6537         }
6538         return -EINVAL;
6539 }
6540 static DRIVER_ATTR_RW(vpd_use_hostno);
6541
6542 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6543 {
6544         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6545 }
6546 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6547                                 size_t count)
6548 {
6549         int n;
6550
6551         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6552                 if (n > 0)
6553                         sdebug_statistics = true;
6554                 else {
6555                         clear_queue_stats();
6556                         sdebug_statistics = false;
6557                 }
6558                 return count;
6559         }
6560         return -EINVAL;
6561 }
6562 static DRIVER_ATTR_RW(statistics);
6563
6564 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6565 {
6566         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6567 }
6568 static DRIVER_ATTR_RO(sector_size);
6569
6570 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6571 {
6572         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6573 }
6574 static DRIVER_ATTR_RO(submit_queues);
6575
6576 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6577 {
6578         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6579 }
6580 static DRIVER_ATTR_RO(dix);
6581
6582 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6583 {
6584         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6585 }
6586 static DRIVER_ATTR_RO(dif);
6587
6588 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6589 {
6590         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6591 }
6592 static DRIVER_ATTR_RO(guard);
6593
6594 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6595 {
6596         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6597 }
6598 static DRIVER_ATTR_RO(ato);
6599
6600 static ssize_t map_show(struct device_driver *ddp, char *buf)
6601 {
6602         ssize_t count = 0;
6603
6604         if (!scsi_debug_lbp())
6605                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6606                                  sdebug_store_sectors);
6607
6608         if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6609                 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6610
6611                 if (sip)
6612                         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6613                                           (int)map_size, sip->map_storep);
6614         }
6615         buf[count++] = '\n';
6616         buf[count] = '\0';
6617
6618         return count;
6619 }
6620 static DRIVER_ATTR_RO(map);
6621
6622 static ssize_t random_show(struct device_driver *ddp, char *buf)
6623 {
6624         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6625 }
6626
6627 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6628                             size_t count)
6629 {
6630         bool v;
6631
6632         if (kstrtobool(buf, &v))
6633                 return -EINVAL;
6634
6635         sdebug_random = v;
6636         return count;
6637 }
6638 static DRIVER_ATTR_RW(random);
6639
6640 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6641 {
6642         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6643 }
6644 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6645                                size_t count)
6646 {
6647         int n;
6648
6649         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6650                 sdebug_removable = (n > 0);
6651                 return count;
6652         }
6653         return -EINVAL;
6654 }
6655 static DRIVER_ATTR_RW(removable);
6656
6657 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6658 {
6659         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6660 }
6661 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6662 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6663                                size_t count)
6664 {
6665         int n;
6666
6667         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6668                 sdebug_host_lock = (n > 0);
6669                 return count;
6670         }
6671         return -EINVAL;
6672 }
6673 static DRIVER_ATTR_RW(host_lock);
6674
6675 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6676 {
6677         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6678 }
6679 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6680                             size_t count)
6681 {
6682         int n;
6683
6684         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6685                 sdebug_strict = (n > 0);
6686                 return count;
6687         }
6688         return -EINVAL;
6689 }
6690 static DRIVER_ATTR_RW(strict);
6691
6692 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6693 {
6694         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6695 }
6696 static DRIVER_ATTR_RO(uuid_ctl);
6697
6698 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6699 {
6700         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6701 }
6702 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6703                              size_t count)
6704 {
6705         int ret, n;
6706
6707         ret = kstrtoint(buf, 0, &n);
6708         if (ret)
6709                 return ret;
6710         sdebug_cdb_len = n;
6711         all_config_cdb_len();
6712         return count;
6713 }
6714 static DRIVER_ATTR_RW(cdb_len);
6715
6716 static const char * const zbc_model_strs_a[] = {
6717         [BLK_ZONED_NONE] = "none",
6718         [BLK_ZONED_HA]   = "host-aware",
6719         [BLK_ZONED_HM]   = "host-managed",
6720 };
6721
6722 static const char * const zbc_model_strs_b[] = {
6723         [BLK_ZONED_NONE] = "no",
6724         [BLK_ZONED_HA]   = "aware",
6725         [BLK_ZONED_HM]   = "managed",
6726 };
6727
6728 static const char * const zbc_model_strs_c[] = {
6729         [BLK_ZONED_NONE] = "0",
6730         [BLK_ZONED_HA]   = "1",
6731         [BLK_ZONED_HM]   = "2",
6732 };
6733
6734 static int sdeb_zbc_model_str(const char *cp)
6735 {
6736         int res = sysfs_match_string(zbc_model_strs_a, cp);
6737
6738         if (res < 0) {
6739                 res = sysfs_match_string(zbc_model_strs_b, cp);
6740                 if (res < 0) {
6741                         res = sysfs_match_string(zbc_model_strs_c, cp);
6742                         if (res < 0)
6743                                 return -EINVAL;
6744                 }
6745         }
6746         return res;
6747 }
6748
6749 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6750 {
6751         return scnprintf(buf, PAGE_SIZE, "%s\n",
6752                          zbc_model_strs_a[sdeb_zbc_model]);
6753 }
6754 static DRIVER_ATTR_RO(zbc);
6755
6756 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6757 {
6758         return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6759 }
6760 static DRIVER_ATTR_RO(tur_ms_to_ready);
6761
6762 /* Note: The following array creates attribute files in the
6763    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6764    files (over those found in the /sys/module/scsi_debug/parameters
6765    directory) is that auxiliary actions can be triggered when an attribute
6766    is changed. For example see: add_host_store() above.
6767  */
6768
6769 static struct attribute *sdebug_drv_attrs[] = {
6770         &driver_attr_delay.attr,
6771         &driver_attr_opts.attr,
6772         &driver_attr_ptype.attr,
6773         &driver_attr_dsense.attr,
6774         &driver_attr_fake_rw.attr,
6775         &driver_attr_host_max_queue.attr,
6776         &driver_attr_no_lun_0.attr,
6777         &driver_attr_num_tgts.attr,
6778         &driver_attr_dev_size_mb.attr,
6779         &driver_attr_num_parts.attr,
6780         &driver_attr_every_nth.attr,
6781         &driver_attr_lun_format.attr,
6782         &driver_attr_max_luns.attr,
6783         &driver_attr_max_queue.attr,
6784         &driver_attr_no_rwlock.attr,
6785         &driver_attr_no_uld.attr,
6786         &driver_attr_scsi_level.attr,
6787         &driver_attr_virtual_gb.attr,
6788         &driver_attr_add_host.attr,
6789         &driver_attr_per_host_store.attr,
6790         &driver_attr_vpd_use_hostno.attr,
6791         &driver_attr_sector_size.attr,
6792         &driver_attr_statistics.attr,
6793         &driver_attr_submit_queues.attr,
6794         &driver_attr_dix.attr,
6795         &driver_attr_dif.attr,
6796         &driver_attr_guard.attr,
6797         &driver_attr_ato.attr,
6798         &driver_attr_map.attr,
6799         &driver_attr_random.attr,
6800         &driver_attr_removable.attr,
6801         &driver_attr_host_lock.attr,
6802         &driver_attr_ndelay.attr,
6803         &driver_attr_strict.attr,
6804         &driver_attr_uuid_ctl.attr,
6805         &driver_attr_cdb_len.attr,
6806         &driver_attr_tur_ms_to_ready.attr,
6807         &driver_attr_zbc.attr,
6808         NULL,
6809 };
6810 ATTRIBUTE_GROUPS(sdebug_drv);
6811
6812 static struct device *pseudo_primary;
6813
6814 static int __init scsi_debug_init(void)
6815 {
6816         bool want_store = (sdebug_fake_rw == 0);
6817         unsigned long sz;
6818         int k, ret, hosts_to_add;
6819         int idx = -1;
6820
6821         ramdisk_lck_a[0] = &atomic_rw;
6822         ramdisk_lck_a[1] = &atomic_rw2;
6823
6824         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6825                 pr_warn("ndelay must be less than 1 second, ignored\n");
6826                 sdebug_ndelay = 0;
6827         } else if (sdebug_ndelay > 0)
6828                 sdebug_jdelay = JDELAY_OVERRIDDEN;
6829
6830         switch (sdebug_sector_size) {
6831         case  512:
6832         case 1024:
6833         case 2048:
6834         case 4096:
6835                 break;
6836         default:
6837                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6838                 return -EINVAL;
6839         }
6840
6841         switch (sdebug_dif) {
6842         case T10_PI_TYPE0_PROTECTION:
6843                 break;
6844         case T10_PI_TYPE1_PROTECTION:
6845         case T10_PI_TYPE2_PROTECTION:
6846         case T10_PI_TYPE3_PROTECTION:
6847                 have_dif_prot = true;
6848                 break;
6849
6850         default:
6851                 pr_err("dif must be 0, 1, 2 or 3\n");
6852                 return -EINVAL;
6853         }
6854
6855         if (sdebug_num_tgts < 0) {
6856                 pr_err("num_tgts must be >= 0\n");
6857                 return -EINVAL;
6858         }
6859
6860         if (sdebug_guard > 1) {
6861                 pr_err("guard must be 0 or 1\n");
6862                 return -EINVAL;
6863         }
6864
6865         if (sdebug_ato > 1) {
6866                 pr_err("ato must be 0 or 1\n");
6867                 return -EINVAL;
6868         }
6869
6870         if (sdebug_physblk_exp > 15) {
6871                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6872                 return -EINVAL;
6873         }
6874
6875         sdebug_lun_am = sdebug_lun_am_i;
6876         if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6877                 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6878                 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6879         }
6880
6881         if (sdebug_max_luns > 256) {
6882                 if (sdebug_max_luns > 16384) {
6883                         pr_warn("max_luns can be no more than 16384, use default\n");
6884                         sdebug_max_luns = DEF_MAX_LUNS;
6885                 }
6886                 sdebug_lun_am = SAM_LUN_AM_FLAT;
6887         }
6888
6889         if (sdebug_lowest_aligned > 0x3fff) {
6890                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6891                 return -EINVAL;
6892         }
6893
6894         if (submit_queues < 1) {
6895                 pr_err("submit_queues must be 1 or more\n");
6896                 return -EINVAL;
6897         }
6898
6899         if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6900                 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6901                 return -EINVAL;
6902         }
6903
6904         if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6905             (sdebug_host_max_queue < 0)) {
6906                 pr_err("host_max_queue must be in range [0 %d]\n",
6907                        SDEBUG_CANQUEUE);
6908                 return -EINVAL;
6909         }
6910
6911         if (sdebug_host_max_queue &&
6912             (sdebug_max_queue != sdebug_host_max_queue)) {
6913                 sdebug_max_queue = sdebug_host_max_queue;
6914                 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6915                         sdebug_max_queue);
6916         }
6917
6918         /*
6919          * check for host managed zoned block device specified with
6920          * ptype=0x14 or zbc=XXX.
6921          */
6922         if (sdebug_ptype == TYPE_ZBC) {
6923                 sdeb_zbc_model = BLK_ZONED_HM;
6924         } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6925                 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6926                 if (k < 0)
6927                         return k;
6928                 sdeb_zbc_model = k;
6929                 switch (sdeb_zbc_model) {
6930                 case BLK_ZONED_NONE:
6931                 case BLK_ZONED_HA:
6932                         sdebug_ptype = TYPE_DISK;
6933                         break;
6934                 case BLK_ZONED_HM:
6935                         sdebug_ptype = TYPE_ZBC;
6936                         break;
6937                 default:
6938                         pr_err("Invalid ZBC model\n");
6939                         return -EINVAL;
6940                 }
6941         }
6942         if (sdeb_zbc_model != BLK_ZONED_NONE) {
6943                 sdeb_zbc_in_use = true;
6944                 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6945                         sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6946         }
6947
6948         if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6949                 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6950         if (sdebug_dev_size_mb < 1)
6951                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6952         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6953         sdebug_store_sectors = sz / sdebug_sector_size;
6954         sdebug_capacity = get_sdebug_capacity();
6955
6956         /* play around with geometry, don't waste too much on track 0 */
6957         sdebug_heads = 8;
6958         sdebug_sectors_per = 32;
6959         if (sdebug_dev_size_mb >= 256)
6960                 sdebug_heads = 64;
6961         else if (sdebug_dev_size_mb >= 16)
6962                 sdebug_heads = 32;
6963         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6964                                (sdebug_sectors_per * sdebug_heads);
6965         if (sdebug_cylinders_per >= 1024) {
6966                 /* other LLDs do this; implies >= 1GB ram disk ... */
6967                 sdebug_heads = 255;
6968                 sdebug_sectors_per = 63;
6969                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6970                                (sdebug_sectors_per * sdebug_heads);
6971         }
6972         if (scsi_debug_lbp()) {
6973                 sdebug_unmap_max_blocks =
6974                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6975
6976                 sdebug_unmap_max_desc =
6977                         clamp(sdebug_unmap_max_desc, 0U, 256U);
6978
6979                 sdebug_unmap_granularity =
6980                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6981
6982                 if (sdebug_unmap_alignment &&
6983                     sdebug_unmap_granularity <=
6984                     sdebug_unmap_alignment) {
6985                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6986                         return -EINVAL;
6987                 }
6988         }
6989         xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6990         if (want_store) {
6991                 idx = sdebug_add_store();
6992                 if (idx < 0)
6993                         return idx;
6994         }
6995
6996         pseudo_primary = root_device_register("pseudo_0");
6997         if (IS_ERR(pseudo_primary)) {
6998                 pr_warn("root_device_register() error\n");
6999                 ret = PTR_ERR(pseudo_primary);
7000                 goto free_vm;
7001         }
7002         ret = bus_register(&pseudo_lld_bus);
7003         if (ret < 0) {
7004                 pr_warn("bus_register error: %d\n", ret);
7005                 goto dev_unreg;
7006         }
7007         ret = driver_register(&sdebug_driverfs_driver);
7008         if (ret < 0) {
7009                 pr_warn("driver_register error: %d\n", ret);
7010                 goto bus_unreg;
7011         }
7012
7013         hosts_to_add = sdebug_add_host;
7014         sdebug_add_host = 0;
7015
7016         queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7017         if (!queued_cmd_cache) {
7018                 ret = -ENOMEM;
7019                 goto driver_unreg;
7020         }
7021
7022         for (k = 0; k < hosts_to_add; k++) {
7023                 if (want_store && k == 0) {
7024                         ret = sdebug_add_host_helper(idx);
7025                         if (ret < 0) {
7026                                 pr_err("add_host_helper k=%d, error=%d\n",
7027                                        k, -ret);
7028                                 break;
7029                         }
7030                 } else {
7031                         ret = sdebug_do_add_host(want_store &&
7032                                                  sdebug_per_host_store);
7033                         if (ret < 0) {
7034                                 pr_err("add_host k=%d error=%d\n", k, -ret);
7035                                 break;
7036                         }
7037                 }
7038         }
7039         if (sdebug_verbose)
7040                 pr_info("built %d host(s)\n", sdebug_num_hosts);
7041
7042         return 0;
7043
7044 driver_unreg:
7045         driver_unregister(&sdebug_driverfs_driver);
7046 bus_unreg:
7047         bus_unregister(&pseudo_lld_bus);
7048 dev_unreg:
7049         root_device_unregister(pseudo_primary);
7050 free_vm:
7051         sdebug_erase_store(idx, NULL);
7052         return ret;
7053 }
7054
7055 static void __exit scsi_debug_exit(void)
7056 {
7057         int k = sdebug_num_hosts;
7058
7059         for (; k; k--)
7060                 sdebug_do_remove_host(true);
7061         kmem_cache_destroy(queued_cmd_cache);
7062         driver_unregister(&sdebug_driverfs_driver);
7063         bus_unregister(&pseudo_lld_bus);
7064         root_device_unregister(pseudo_primary);
7065
7066         sdebug_erase_all_stores(false);
7067         xa_destroy(per_store_ap);
7068 }
7069
7070 device_initcall(scsi_debug_init);
7071 module_exit(scsi_debug_exit);
7072
7073 static void sdebug_release_adapter(struct device *dev)
7074 {
7075         struct sdebug_host_info *sdbg_host;
7076
7077         sdbg_host = dev_to_sdebug_host(dev);
7078         kfree(sdbg_host);
7079 }
7080
7081 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7082 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7083 {
7084         if (idx < 0)
7085                 return;
7086         if (!sip) {
7087                 if (xa_empty(per_store_ap))
7088                         return;
7089                 sip = xa_load(per_store_ap, idx);
7090                 if (!sip)
7091                         return;
7092         }
7093         vfree(sip->map_storep);
7094         vfree(sip->dif_storep);
7095         vfree(sip->storep);
7096         xa_erase(per_store_ap, idx);
7097         kfree(sip);
7098 }
7099
7100 /* Assume apart_from_first==false only in shutdown case. */
7101 static void sdebug_erase_all_stores(bool apart_from_first)
7102 {
7103         unsigned long idx;
7104         struct sdeb_store_info *sip = NULL;
7105
7106         xa_for_each(per_store_ap, idx, sip) {
7107                 if (apart_from_first)
7108                         apart_from_first = false;
7109                 else
7110                         sdebug_erase_store(idx, sip);
7111         }
7112         if (apart_from_first)
7113                 sdeb_most_recent_idx = sdeb_first_idx;
7114 }
7115
7116 /*
7117  * Returns store xarray new element index (idx) if >=0 else negated errno.
7118  * Limit the number of stores to 65536.
7119  */
7120 static int sdebug_add_store(void)
7121 {
7122         int res;
7123         u32 n_idx;
7124         unsigned long iflags;
7125         unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7126         struct sdeb_store_info *sip = NULL;
7127         struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7128
7129         sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7130         if (!sip)
7131                 return -ENOMEM;
7132
7133         xa_lock_irqsave(per_store_ap, iflags);
7134         res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7135         if (unlikely(res < 0)) {
7136                 xa_unlock_irqrestore(per_store_ap, iflags);
7137                 kfree(sip);
7138                 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7139                 return res;
7140         }
7141         sdeb_most_recent_idx = n_idx;
7142         if (sdeb_first_idx < 0)
7143                 sdeb_first_idx = n_idx;
7144         xa_unlock_irqrestore(per_store_ap, iflags);
7145
7146         res = -ENOMEM;
7147         sip->storep = vzalloc(sz);
7148         if (!sip->storep) {
7149                 pr_err("user data oom\n");
7150                 goto err;
7151         }
7152         if (sdebug_num_parts > 0)
7153                 sdebug_build_parts(sip->storep, sz);
7154
7155         /* DIF/DIX: what T10 calls Protection Information (PI) */
7156         if (sdebug_dix) {
7157                 int dif_size;
7158
7159                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7160                 sip->dif_storep = vmalloc(dif_size);
7161
7162                 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7163                         sip->dif_storep);
7164
7165                 if (!sip->dif_storep) {
7166                         pr_err("DIX oom\n");
7167                         goto err;
7168                 }
7169                 memset(sip->dif_storep, 0xff, dif_size);
7170         }
7171         /* Logical Block Provisioning */
7172         if (scsi_debug_lbp()) {
7173                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7174                 sip->map_storep = vmalloc(array_size(sizeof(long),
7175                                                      BITS_TO_LONGS(map_size)));
7176
7177                 pr_info("%lu provisioning blocks\n", map_size);
7178
7179                 if (!sip->map_storep) {
7180                         pr_err("LBP map oom\n");
7181                         goto err;
7182                 }
7183
7184                 bitmap_zero(sip->map_storep, map_size);
7185
7186                 /* Map first 1KB for partition table */
7187                 if (sdebug_num_parts)
7188                         map_region(sip, 0, 2);
7189         }
7190
7191         rwlock_init(&sip->macc_lck);
7192         return (int)n_idx;
7193 err:
7194         sdebug_erase_store((int)n_idx, sip);
7195         pr_warn("%s: failed, errno=%d\n", __func__, -res);
7196         return res;
7197 }
7198
7199 static int sdebug_add_host_helper(int per_host_idx)
7200 {
7201         int k, devs_per_host, idx;
7202         int error = -ENOMEM;
7203         struct sdebug_host_info *sdbg_host;
7204         struct sdebug_dev_info *sdbg_devinfo, *tmp;
7205
7206         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7207         if (!sdbg_host)
7208                 return -ENOMEM;
7209         idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7210         if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7211                 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7212         sdbg_host->si_idx = idx;
7213
7214         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7215
7216         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7217         for (k = 0; k < devs_per_host; k++) {
7218                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7219                 if (!sdbg_devinfo)
7220                         goto clean;
7221         }
7222
7223         mutex_lock(&sdebug_host_list_mutex);
7224         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7225         mutex_unlock(&sdebug_host_list_mutex);
7226
7227         sdbg_host->dev.bus = &pseudo_lld_bus;
7228         sdbg_host->dev.parent = pseudo_primary;
7229         sdbg_host->dev.release = &sdebug_release_adapter;
7230         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7231
7232         error = device_register(&sdbg_host->dev);
7233         if (error) {
7234                 mutex_lock(&sdebug_host_list_mutex);
7235                 list_del(&sdbg_host->host_list);
7236                 mutex_unlock(&sdebug_host_list_mutex);
7237                 goto clean;
7238         }
7239
7240         ++sdebug_num_hosts;
7241         return 0;
7242
7243 clean:
7244         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7245                                  dev_list) {
7246                 list_del(&sdbg_devinfo->dev_list);
7247                 kfree(sdbg_devinfo->zstate);
7248                 kfree(sdbg_devinfo);
7249         }
7250         if (sdbg_host->dev.release)
7251                 put_device(&sdbg_host->dev);
7252         else
7253                 kfree(sdbg_host);
7254         pr_warn("%s: failed, errno=%d\n", __func__, -error);
7255         return error;
7256 }
7257
7258 static int sdebug_do_add_host(bool mk_new_store)
7259 {
7260         int ph_idx = sdeb_most_recent_idx;
7261
7262         if (mk_new_store) {
7263                 ph_idx = sdebug_add_store();
7264                 if (ph_idx < 0)
7265                         return ph_idx;
7266         }
7267         return sdebug_add_host_helper(ph_idx);
7268 }
7269
7270 static void sdebug_do_remove_host(bool the_end)
7271 {
7272         int idx = -1;
7273         struct sdebug_host_info *sdbg_host = NULL;
7274         struct sdebug_host_info *sdbg_host2;
7275
7276         mutex_lock(&sdebug_host_list_mutex);
7277         if (!list_empty(&sdebug_host_list)) {
7278                 sdbg_host = list_entry(sdebug_host_list.prev,
7279                                        struct sdebug_host_info, host_list);
7280                 idx = sdbg_host->si_idx;
7281         }
7282         if (!the_end && idx >= 0) {
7283                 bool unique = true;
7284
7285                 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7286                         if (sdbg_host2 == sdbg_host)
7287                                 continue;
7288                         if (idx == sdbg_host2->si_idx) {
7289                                 unique = false;
7290                                 break;
7291                         }
7292                 }
7293                 if (unique) {
7294                         xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7295                         if (idx == sdeb_most_recent_idx)
7296                                 --sdeb_most_recent_idx;
7297                 }
7298         }
7299         if (sdbg_host)
7300                 list_del(&sdbg_host->host_list);
7301         mutex_unlock(&sdebug_host_list_mutex);
7302
7303         if (!sdbg_host)
7304                 return;
7305
7306         device_unregister(&sdbg_host->dev);
7307         --sdebug_num_hosts;
7308 }
7309
7310 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7311 {
7312         struct sdebug_dev_info *devip = sdev->hostdata;
7313
7314         if (!devip)
7315                 return  -ENODEV;
7316
7317         mutex_lock(&sdebug_host_list_mutex);
7318         block_unblock_all_queues(true);
7319
7320         if (qdepth > SDEBUG_CANQUEUE) {
7321                 qdepth = SDEBUG_CANQUEUE;
7322                 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7323                         qdepth, SDEBUG_CANQUEUE);
7324         }
7325         if (qdepth < 1)
7326                 qdepth = 1;
7327         if (qdepth != sdev->queue_depth)
7328                 scsi_change_queue_depth(sdev, qdepth);
7329
7330         block_unblock_all_queues(false);
7331         mutex_unlock(&sdebug_host_list_mutex);
7332
7333         if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7334                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7335
7336         return sdev->queue_depth;
7337 }
7338
7339 static bool fake_timeout(struct scsi_cmnd *scp)
7340 {
7341         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7342                 if (sdebug_every_nth < -1)
7343                         sdebug_every_nth = -1;
7344                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7345                         return true; /* ignore command causing timeout */
7346                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7347                          scsi_medium_access_command(scp))
7348                         return true; /* time out reads and writes */
7349         }
7350         return false;
7351 }
7352
7353 /* Response to TUR or media access command when device stopped */
7354 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7355 {
7356         int stopped_state;
7357         u64 diff_ns = 0;
7358         ktime_t now_ts = ktime_get_boottime();
7359         struct scsi_device *sdp = scp->device;
7360
7361         stopped_state = atomic_read(&devip->stopped);
7362         if (stopped_state == 2) {
7363                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7364                         diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7365                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7366                                 /* tur_ms_to_ready timer extinguished */
7367                                 atomic_set(&devip->stopped, 0);
7368                                 return 0;
7369                         }
7370                 }
7371                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7372                 if (sdebug_verbose)
7373                         sdev_printk(KERN_INFO, sdp,
7374                                     "%s: Not ready: in process of becoming ready\n", my_name);
7375                 if (scp->cmnd[0] == TEST_UNIT_READY) {
7376                         u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7377
7378                         if (diff_ns <= tur_nanosecs_to_ready)
7379                                 diff_ns = tur_nanosecs_to_ready - diff_ns;
7380                         else
7381                                 diff_ns = tur_nanosecs_to_ready;
7382                         /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7383                         do_div(diff_ns, 1000000);       /* diff_ns becomes milliseconds */
7384                         scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7385                                                    diff_ns);
7386                         return check_condition_result;
7387                 }
7388         }
7389         mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7390         if (sdebug_verbose)
7391                 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7392                             my_name);
7393         return check_condition_result;
7394 }
7395
7396 static void sdebug_map_queues(struct Scsi_Host *shost)
7397 {
7398         int i, qoff;
7399
7400         if (shost->nr_hw_queues == 1)
7401                 return;
7402
7403         for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7404                 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7405
7406                 map->nr_queues  = 0;
7407
7408                 if (i == HCTX_TYPE_DEFAULT)
7409                         map->nr_queues = submit_queues - poll_queues;
7410                 else if (i == HCTX_TYPE_POLL)
7411                         map->nr_queues = poll_queues;
7412
7413                 if (!map->nr_queues) {
7414                         BUG_ON(i == HCTX_TYPE_DEFAULT);
7415                         continue;
7416                 }
7417
7418                 map->queue_offset = qoff;
7419                 blk_mq_map_queues(map);
7420
7421                 qoff += map->nr_queues;
7422         }
7423 }
7424
7425 struct sdebug_blk_mq_poll_data {
7426         unsigned int queue_num;
7427         int *num_entries;
7428 };
7429
7430 /*
7431  * We don't handle aborted commands here, but it does not seem possible to have
7432  * aborted polled commands from schedule_resp()
7433  */
7434 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7435 {
7436         struct sdebug_blk_mq_poll_data *data = opaque;
7437         struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7438         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7439         struct sdebug_defer *sd_dp;
7440         u32 unique_tag = blk_mq_unique_tag(rq);
7441         u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7442         struct sdebug_queued_cmd *sqcp;
7443         unsigned long flags;
7444         int queue_num = data->queue_num;
7445         ktime_t time;
7446
7447         /* We're only interested in one queue for this iteration */
7448         if (hwq != queue_num)
7449                 return true;
7450
7451         /* Subsequent checks would fail if this failed, but check anyway */
7452         if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7453                 return true;
7454
7455         time = ktime_get_boottime();
7456
7457         spin_lock_irqsave(&sdsc->lock, flags);
7458         sqcp = TO_QUEUED_CMD(cmd);
7459         if (!sqcp) {
7460                 spin_unlock_irqrestore(&sdsc->lock, flags);
7461                 return true;
7462         }
7463
7464         sd_dp = &sqcp->sd_dp;
7465         if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7466                 spin_unlock_irqrestore(&sdsc->lock, flags);
7467                 return true;
7468         }
7469
7470         if (time < sd_dp->cmpl_ts) {
7471                 spin_unlock_irqrestore(&sdsc->lock, flags);
7472                 return true;
7473         }
7474
7475         ASSIGN_QUEUED_CMD(cmd, NULL);
7476         spin_unlock_irqrestore(&sdsc->lock, flags);
7477
7478         if (sdebug_statistics) {
7479                 atomic_inc(&sdebug_completions);
7480                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7481                         atomic_inc(&sdebug_miss_cpus);
7482         }
7483
7484         sdebug_free_queued_cmd(sqcp);
7485
7486         scsi_done(cmd); /* callback to mid level */
7487         (*data->num_entries)++;
7488         return true;
7489 }
7490
7491 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7492 {
7493         int num_entries = 0;
7494         struct sdebug_blk_mq_poll_data data = {
7495                 .queue_num = queue_num,
7496                 .num_entries = &num_entries,
7497         };
7498
7499         blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7500                                 &data);
7501
7502         if (num_entries > 0)
7503                 atomic_add(num_entries, &sdeb_mq_poll_count);
7504         return num_entries;
7505 }
7506
7507 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7508                                    struct scsi_cmnd *scp)
7509 {
7510         u8 sdeb_i;
7511         struct scsi_device *sdp = scp->device;
7512         const struct opcode_info_t *oip;
7513         const struct opcode_info_t *r_oip;
7514         struct sdebug_dev_info *devip;
7515         u8 *cmd = scp->cmnd;
7516         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7517         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7518         int k, na;
7519         int errsts = 0;
7520         u64 lun_index = sdp->lun & 0x3FFF;
7521         u32 flags;
7522         u16 sa;
7523         u8 opcode = cmd[0];
7524         bool has_wlun_rl;
7525         bool inject_now;
7526
7527         scsi_set_resid(scp, 0);
7528         if (sdebug_statistics) {
7529                 atomic_inc(&sdebug_cmnd_count);
7530                 inject_now = inject_on_this_cmd();
7531         } else {
7532                 inject_now = false;
7533         }
7534         if (unlikely(sdebug_verbose &&
7535                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7536                 char b[120];
7537                 int n, len, sb;
7538
7539                 len = scp->cmd_len;
7540                 sb = (int)sizeof(b);
7541                 if (len > 32)
7542                         strcpy(b, "too long, over 32 bytes");
7543                 else {
7544                         for (k = 0, n = 0; k < len && n < sb; ++k)
7545                                 n += scnprintf(b + n, sb - n, "%02x ",
7546                                                (u32)cmd[k]);
7547                 }
7548                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7549                             blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7550         }
7551         if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7552                 return SCSI_MLQUEUE_HOST_BUSY;
7553         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7554         if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7555                 goto err_out;
7556
7557         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
7558         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
7559         devip = (struct sdebug_dev_info *)sdp->hostdata;
7560         if (unlikely(!devip)) {
7561                 devip = find_build_dev_info(sdp);
7562                 if (NULL == devip)
7563                         goto err_out;
7564         }
7565         if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7566                 atomic_set(&sdeb_inject_pending, 1);
7567
7568         na = oip->num_attached;
7569         r_pfp = oip->pfp;
7570         if (na) {       /* multiple commands with this opcode */
7571                 r_oip = oip;
7572                 if (FF_SA & r_oip->flags) {
7573                         if (F_SA_LOW & oip->flags)
7574                                 sa = 0x1f & cmd[1];
7575                         else
7576                                 sa = get_unaligned_be16(cmd + 8);
7577                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7578                                 if (opcode == oip->opcode && sa == oip->sa)
7579                                         break;
7580                         }
7581                 } else {   /* since no service action only check opcode */
7582                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7583                                 if (opcode == oip->opcode)
7584                                         break;
7585                         }
7586                 }
7587                 if (k > na) {
7588                         if (F_SA_LOW & r_oip->flags)
7589                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7590                         else if (F_SA_HIGH & r_oip->flags)
7591                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7592                         else
7593                                 mk_sense_invalid_opcode(scp);
7594                         goto check_cond;
7595                 }
7596         }       /* else (when na==0) we assume the oip is a match */
7597         flags = oip->flags;
7598         if (unlikely(F_INV_OP & flags)) {
7599                 mk_sense_invalid_opcode(scp);
7600                 goto check_cond;
7601         }
7602         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7603                 if (sdebug_verbose)
7604                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7605                                     my_name, opcode, " supported for wlun");
7606                 mk_sense_invalid_opcode(scp);
7607                 goto check_cond;
7608         }
7609         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
7610                 u8 rem;
7611                 int j;
7612
7613                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7614                         rem = ~oip->len_mask[k] & cmd[k];
7615                         if (rem) {
7616                                 for (j = 7; j >= 0; --j, rem <<= 1) {
7617                                         if (0x80 & rem)
7618                                                 break;
7619                                 }
7620                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7621                                 goto check_cond;
7622                         }
7623                 }
7624         }
7625         if (unlikely(!(F_SKIP_UA & flags) &&
7626                      find_first_bit(devip->uas_bm,
7627                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7628                 errsts = make_ua(scp, devip);
7629                 if (errsts)
7630                         goto check_cond;
7631         }
7632         if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7633                      atomic_read(&devip->stopped))) {
7634                 errsts = resp_not_ready(scp, devip);
7635                 if (errsts)
7636                         goto fini;
7637         }
7638         if (sdebug_fake_rw && (F_FAKE_RW & flags))
7639                 goto fini;
7640         if (unlikely(sdebug_every_nth)) {
7641                 if (fake_timeout(scp))
7642                         return 0;       /* ignore command: make trouble */
7643         }
7644         if (likely(oip->pfp))
7645                 pfp = oip->pfp; /* calls a resp_* function */
7646         else
7647                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7648
7649 fini:
7650         if (F_DELAY_OVERR & flags)      /* cmds like INQUIRY respond asap */
7651                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7652         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7653                                             sdebug_ndelay > 10000)) {
7654                 /*
7655                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
7656                  * for Start Stop Unit (SSU) want at least 1 second delay and
7657                  * if sdebug_jdelay>1 want a long delay of that many seconds.
7658                  * For Synchronize Cache want 1/20 of SSU's delay.
7659                  */
7660                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7661                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7662
7663                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7664                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7665         } else
7666                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7667                                      sdebug_ndelay);
7668 check_cond:
7669         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7670 err_out:
7671         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7672 }
7673
7674 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
7675 {
7676         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7677
7678         spin_lock_init(&sdsc->lock);
7679
7680         return 0;
7681 }
7682
7683
7684 static struct scsi_host_template sdebug_driver_template = {
7685         .show_info =            scsi_debug_show_info,
7686         .write_info =           scsi_debug_write_info,
7687         .proc_name =            sdebug_proc_name,
7688         .name =                 "SCSI DEBUG",
7689         .info =                 scsi_debug_info,
7690         .slave_alloc =          scsi_debug_slave_alloc,
7691         .slave_configure =      scsi_debug_slave_configure,
7692         .slave_destroy =        scsi_debug_slave_destroy,
7693         .ioctl =                scsi_debug_ioctl,
7694         .queuecommand =         scsi_debug_queuecommand,
7695         .change_queue_depth =   sdebug_change_qdepth,
7696         .map_queues =           sdebug_map_queues,
7697         .mq_poll =              sdebug_blk_mq_poll,
7698         .eh_abort_handler =     scsi_debug_abort,
7699         .eh_device_reset_handler = scsi_debug_device_reset,
7700         .eh_target_reset_handler = scsi_debug_target_reset,
7701         .eh_bus_reset_handler = scsi_debug_bus_reset,
7702         .eh_host_reset_handler = scsi_debug_host_reset,
7703         .can_queue =            SDEBUG_CANQUEUE,
7704         .this_id =              7,
7705         .sg_tablesize =         SG_MAX_SEGMENTS,
7706         .cmd_per_lun =          DEF_CMD_PER_LUN,
7707         .max_sectors =          -1U,
7708         .max_segment_size =     -1U,
7709         .module =               THIS_MODULE,
7710         .track_queue_depth =    1,
7711         .cmd_size = sizeof(struct sdebug_scsi_cmd),
7712         .init_cmd_priv = sdebug_init_cmd_priv,
7713 };
7714
7715 static int sdebug_driver_probe(struct device *dev)
7716 {
7717         int error = 0;
7718         struct sdebug_host_info *sdbg_host;
7719         struct Scsi_Host *hpnt;
7720         int hprot;
7721
7722         sdbg_host = dev_to_sdebug_host(dev);
7723
7724         sdebug_driver_template.can_queue = sdebug_max_queue;
7725         sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7726         if (!sdebug_clustering)
7727                 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7728
7729         hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7730         if (NULL == hpnt) {
7731                 pr_err("scsi_host_alloc failed\n");
7732                 error = -ENODEV;
7733                 return error;
7734         }
7735         if (submit_queues > nr_cpu_ids) {
7736                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7737                         my_name, submit_queues, nr_cpu_ids);
7738                 submit_queues = nr_cpu_ids;
7739         }
7740         /*
7741          * Decide whether to tell scsi subsystem that we want mq. The
7742          * following should give the same answer for each host.
7743          */
7744         hpnt->nr_hw_queues = submit_queues;
7745         if (sdebug_host_max_queue)
7746                 hpnt->host_tagset = 1;
7747
7748         /* poll queues are possible for nr_hw_queues > 1 */
7749         if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7750                 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7751                          my_name, poll_queues, hpnt->nr_hw_queues);
7752                 poll_queues = 0;
7753         }
7754
7755         /*
7756          * Poll queues don't need interrupts, but we need at least one I/O queue
7757          * left over for non-polled I/O.
7758          * If condition not met, trim poll_queues to 1 (just for simplicity).
7759          */
7760         if (poll_queues >= submit_queues) {
7761                 if (submit_queues < 3)
7762                         pr_warn("%s: trim poll_queues to 1\n", my_name);
7763                 else
7764                         pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7765                                 my_name, submit_queues - 1);
7766                 poll_queues = 1;
7767         }
7768         if (poll_queues)
7769                 hpnt->nr_maps = 3;
7770
7771         sdbg_host->shost = hpnt;
7772         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7773                 hpnt->max_id = sdebug_num_tgts + 1;
7774         else
7775                 hpnt->max_id = sdebug_num_tgts;
7776         /* = sdebug_max_luns; */
7777         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7778
7779         hprot = 0;
7780
7781         switch (sdebug_dif) {
7782
7783         case T10_PI_TYPE1_PROTECTION:
7784                 hprot = SHOST_DIF_TYPE1_PROTECTION;
7785                 if (sdebug_dix)
7786                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
7787                 break;
7788
7789         case T10_PI_TYPE2_PROTECTION:
7790                 hprot = SHOST_DIF_TYPE2_PROTECTION;
7791                 if (sdebug_dix)
7792                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
7793                 break;
7794
7795         case T10_PI_TYPE3_PROTECTION:
7796                 hprot = SHOST_DIF_TYPE3_PROTECTION;
7797                 if (sdebug_dix)
7798                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
7799                 break;
7800
7801         default:
7802                 if (sdebug_dix)
7803                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
7804                 break;
7805         }
7806
7807         scsi_host_set_prot(hpnt, hprot);
7808
7809         if (have_dif_prot || sdebug_dix)
7810                 pr_info("host protection%s%s%s%s%s%s%s\n",
7811                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7812                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7813                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7814                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7815                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7816                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7817                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7818
7819         if (sdebug_guard == 1)
7820                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7821         else
7822                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7823
7824         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7825         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7826         if (sdebug_every_nth)   /* need stats counters for every_nth */
7827                 sdebug_statistics = true;
7828         error = scsi_add_host(hpnt, &sdbg_host->dev);
7829         if (error) {
7830                 pr_err("scsi_add_host failed\n");
7831                 error = -ENODEV;
7832                 scsi_host_put(hpnt);
7833         } else {
7834                 scsi_scan_host(hpnt);
7835         }
7836
7837         return error;
7838 }
7839
7840 static void sdebug_driver_remove(struct device *dev)
7841 {
7842         struct sdebug_host_info *sdbg_host;
7843         struct sdebug_dev_info *sdbg_devinfo, *tmp;
7844
7845         sdbg_host = dev_to_sdebug_host(dev);
7846
7847         scsi_remove_host(sdbg_host->shost);
7848
7849         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7850                                  dev_list) {
7851                 list_del(&sdbg_devinfo->dev_list);
7852                 kfree(sdbg_devinfo->zstate);
7853                 kfree(sdbg_devinfo);
7854         }
7855
7856         scsi_host_put(sdbg_host->shost);
7857 }
7858
7859 static struct bus_type pseudo_lld_bus = {
7860         .name = "pseudo",
7861         .probe = sdebug_driver_probe,
7862         .remove = sdebug_driver_remove,
7863         .drv_groups = sdebug_drv_groups,
7864 };