1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
45 #include <net/checksum.h>
47 #include <asm/unaligned.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
65 #define MY_NAME "scsi_debug"
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST 1
109 #define DEF_NUM_TGTS 1
110 #define DEF_MAX_LUNS 1
111 /* With these defaults, this driver will make 1 host with 1 target
112 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT 0
118 #define DEF_DEV_SIZE_MB 8
119 #define DEF_ZBC_DEV_SIZE_MB 128
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE 0
124 #define DEF_EVERY_NTH 0
125 #define DEF_FAKE_RW 0
127 #define DEF_HOST_LOCK 0
130 #define DEF_LBPWS10 0
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0 0
135 #define DEF_NUM_PARTS 0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB 0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB 128
161 #define DEF_ZBC_MAX_OPEN_ZONES 8
162 #define DEF_ZBC_NR_CONV_ZONES 1
164 #define SDEBUG_LUN_0_VAL 0
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE 1
168 #define SDEBUG_OPT_MEDIUM_ERR 2
169 #define SDEBUG_OPT_TIMEOUT 4
170 #define SDEBUG_OPT_RECOVERED_ERR 8
171 #define SDEBUG_OPT_TRANSPORT_ERR 16
172 #define SDEBUG_OPT_DIF_ERR 32
173 #define SDEBUG_OPT_DIX_ERR 64
174 #define SDEBUG_OPT_MAC_TIMEOUT 128
175 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
176 #define SDEBUG_OPT_Q_NOISE 0x200
177 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
178 #define SDEBUG_OPT_RARE_TSF 0x800
179 #define SDEBUG_OPT_N_WCE 0x1000
180 #define SDEBUG_OPT_RESET_NOISE 0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
182 #define SDEBUG_OPT_HOST_BUSY 0x8000
183 #define SDEBUG_OPT_CMD_ABORT 0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 SDEBUG_OPT_TRANSPORT_ERR | \
188 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 SDEBUG_OPT_SHORT_TRANSFER | \
190 SDEBUG_OPT_HOST_BUSY | \
191 SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196 * priority order. In the subset implemented here lower numbers have higher
197 * priority. The UA numbers should be a sequence starting from 0 with
198 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210 * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN 1 /* Data-in command (e.g. READ) */
227 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
230 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
233 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
236 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
238 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
247 #define SDEBUG_MAX_PARTS 4
249 #define SDEBUG_MAX_CMD_LEN 32
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 /* Zone types (zbcr05 table 25) */
258 /* ZBC_ZTYPE_SOBR = 0x4, */
262 /* enumeration names taken from table 26, zbcr05 */
264 ZBC_NOT_WRITE_POINTER = 0x0,
266 ZC2_IMPLICIT_OPEN = 0x2,
267 ZC3_EXPLICIT_OPEN = 0x3,
274 struct sdeb_zone_state { /* ZBC: per zone state */
275 enum sdebug_z_type z_type;
276 enum sdebug_z_cond z_cond;
277 bool z_non_seq_resource;
283 struct sdebug_dev_info {
284 struct list_head dev_list;
285 unsigned int channel;
289 struct sdebug_host_info *sdbg_host;
290 unsigned long uas_bm[1];
292 atomic_t stopped; /* 1: by SSU, 2: device start */
295 /* For ZBC devices */
296 enum blk_zoned_model zmodel;
299 unsigned int zsize_shift;
300 unsigned int nr_zones;
301 unsigned int nr_conv_zones;
302 unsigned int nr_seq_zones;
303 unsigned int nr_imp_open;
304 unsigned int nr_exp_open;
305 unsigned int nr_closed;
306 unsigned int max_open;
307 ktime_t create_ts; /* time since bootup that this device was created */
308 struct sdeb_zone_state *zstate;
311 struct sdebug_host_info {
312 struct list_head host_list;
313 int si_idx; /* sdeb_store_info (per host) xarray index */
314 struct Scsi_Host *shost;
316 struct list_head dev_info_list;
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 rwlock_t macc_lck; /* for atomic media access on this store */
322 u8 *storep; /* user data storage (ram) */
323 struct t10_pi_tuple *dif_storep; /* protection info */
324 void *map_storep; /* provisioning map */
327 #define to_sdebug_host(d) \
328 container_of(d, struct sdebug_host_info, dev)
330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
333 struct sdebug_defer {
335 struct execute_work ew;
336 ktime_t cmpl_ts;/* time since boot to complete this cmd */
337 int sqa_idx; /* index of sdebug_queue array */
338 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
339 int hc_idx; /* hostwide tag index */
344 bool aborted; /* true when blk_abort_request() already called */
345 enum sdeb_defer_type defer_t;
348 struct sdebug_queued_cmd {
349 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 * instance indicates this slot is in use.
352 struct sdebug_defer *sd_dp;
353 struct scsi_cmnd *a_cmnd;
356 struct sdebug_queue {
357 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
358 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
360 atomic_t blocked; /* to temporarily stop more being queued */
363 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
364 static atomic_t sdebug_completions; /* count of deferred completions */
365 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
366 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
367 static atomic_t sdeb_inject_pending;
368 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
370 struct opcode_info_t {
371 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
372 /* for terminating element */
373 u8 opcode; /* if num_attached > 0, preferred */
374 u16 sa; /* service action */
375 u32 flags; /* OR-ed set of SDEB_F_* */
376 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
377 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
378 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
379 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
382 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
383 enum sdeb_opcode_index {
384 SDEB_I_INVALID_OPCODE = 0,
386 SDEB_I_REPORT_LUNS = 2,
387 SDEB_I_REQUEST_SENSE = 3,
388 SDEB_I_TEST_UNIT_READY = 4,
389 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
390 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
391 SDEB_I_LOG_SENSE = 7,
392 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
393 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
394 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
395 SDEB_I_START_STOP = 11,
396 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
397 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
398 SDEB_I_MAINT_IN = 14,
399 SDEB_I_MAINT_OUT = 15,
400 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
401 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
402 SDEB_I_RESERVE = 18, /* 6, 10 */
403 SDEB_I_RELEASE = 19, /* 6, 10 */
404 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
405 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
406 SDEB_I_ATA_PT = 22, /* 12, 16 */
407 SDEB_I_SEND_DIAG = 23,
409 SDEB_I_WRITE_BUFFER = 25,
410 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
411 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
412 SDEB_I_COMP_WRITE = 28,
413 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
414 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
415 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
416 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
420 static const unsigned char opcode_ind_arr[256] = {
421 /* 0x0; 0x0->0x1f: 6 byte cdbs */
422 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
424 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
425 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
427 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
428 SDEB_I_ALLOW_REMOVAL, 0,
429 /* 0x20; 0x20->0x3f: 10 byte cdbs */
430 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
431 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
432 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
433 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
434 /* 0x40; 0x40->0x5f: 10 byte cdbs */
435 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
437 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
439 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
440 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
441 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
442 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 0, SDEB_I_VARIABLE_LEN,
444 /* 0x80; 0x80->0x9f: 16 byte cdbs */
445 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
446 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
447 0, 0, 0, SDEB_I_VERIFY,
448 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
449 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
450 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
451 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
452 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
453 SDEB_I_MAINT_OUT, 0, 0, 0,
454 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
455 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0,
457 0, 0, 0, 0, 0, 0, 0, 0,
458 /* 0xc0; 0xc0->0xff: vendor specific */
459 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
466 * The following "response" functions return the SCSI mid-level's 4 byte
467 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
468 * command completion, they can mask their return value with
469 * SDEG_RES_IMMED_MASK .
471 #define SDEG_RES_IMMED_MASK 0x40000000
473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int sdebug_do_add_host(bool mk_new_store);
504 static int sdebug_add_host_helper(int per_host_idx);
505 static void sdebug_do_remove_host(bool the_end);
506 static int sdebug_add_store(void);
507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
508 static void sdebug_erase_all_stores(bool apart_from_first);
511 * The following are overflow arrays for cdbs that "hit" the same index in
512 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513 * should be placed in opcode_info_arr[], the others should be placed here.
515 static const struct opcode_info_t msense_iarr[] = {
516 {0, 0x1a, 0, F_D_IN, NULL, NULL,
517 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 static const struct opcode_info_t mselect_iarr[] = {
521 {0, 0x15, 0, F_D_OUT, NULL, NULL,
522 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 static const struct opcode_info_t read_iarr[] = {
526 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
529 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
536 static const struct opcode_info_t write_iarr[] = {
537 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
538 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
540 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
541 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
543 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
544 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 0xbf, 0xc7, 0, 0, 0, 0} },
548 static const struct opcode_info_t verify_iarr[] = {
549 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
560 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
561 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
564 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
569 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
570 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
578 static const struct opcode_info_t write_same_iarr[] = {
579 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
584 static const struct opcode_info_t reserve_iarr[] = {
585 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
586 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 static const struct opcode_info_t release_iarr[] = {
590 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
591 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
606 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
607 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
610 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
613 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
618 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
619 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626 * plus the terminating elements for logic that scans this table such as
627 * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
630 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
631 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 0, 0} }, /* REPORT LUNS */
637 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
643 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
644 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
646 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
647 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
649 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
651 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
652 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
654 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
656 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
658 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 resp_write_dt0, write_iarr, /* WRITE(16) */
660 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
671 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
673 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 0xff, 0, 0xc7, 0, 0, 0, 0} },
676 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
680 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
684 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
686 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
688 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 NULL, release_iarr, /* RELEASE(10) <no response function> */
692 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
695 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
702 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
706 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 0, 0, 0, 0} }, /* WRITE_BUFFER */
709 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
711 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
713 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 resp_sync_cache, sync_cache_iarr,
715 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
717 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
720 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 resp_pre_fetch, pre_fetch_iarr,
722 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 0, 0, 0, 0} }, /* PRE-FETCH (10) */
726 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
735 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
736 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue; /* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
758 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
759 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
760 static int sdebug_no_uld;
761 static int sdebug_num_parts = DEF_NUM_PARTS;
762 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
763 static int sdebug_opt_blks = DEF_OPT_BLKS;
764 static int sdebug_opts = DEF_OPTS;
765 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
766 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
767 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
768 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
769 static int sdebug_sector_size = DEF_SECTOR_SIZE;
770 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
771 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
772 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
773 static unsigned int sdebug_lbpu = DEF_LBPU;
774 static unsigned int sdebug_lbpws = DEF_LBPWS;
775 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
776 static unsigned int sdebug_lbprz = DEF_LBPRZ;
777 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
778 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
779 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
780 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
781 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
782 static int sdebug_uuid_ctl = DEF_UUID_CTL;
783 static bool sdebug_random = DEF_RANDOM;
784 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
785 static bool sdebug_removable = DEF_REMOVABLE;
786 static bool sdebug_clustering;
787 static bool sdebug_host_lock = DEF_HOST_LOCK;
788 static bool sdebug_strict = DEF_STRICT;
789 static bool sdebug_any_injecting_opt;
790 static bool sdebug_no_rwlock;
791 static bool sdebug_verbose;
792 static bool have_dif_prot;
793 static bool write_since_sync;
794 static bool sdebug_statistics = DEF_STATISTICS;
795 static bool sdebug_wp;
796 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
798 static char *sdeb_zbc_model_s;
800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
801 SAM_LUN_AM_FLAT = 0x1,
802 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
803 SAM_LUN_AM_EXTENDED = 0x3};
804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
807 static unsigned int sdebug_store_sectors;
808 static sector_t sdebug_capacity; /* in sectors */
810 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
811 may still need them */
812 static int sdebug_heads; /* heads per disk */
813 static int sdebug_cylinders_per; /* cylinders per surface */
814 static int sdebug_sectors_per; /* sectors per cylinder */
816 static LIST_HEAD(sdebug_host_list);
817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
819 static struct xarray per_store_arr;
820 static struct xarray *per_store_ap = &per_store_arr;
821 static int sdeb_first_idx = -1; /* invalid index ==> none created */
822 static int sdeb_most_recent_idx = -1;
823 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
825 static unsigned long map_size;
826 static int num_aborts;
827 static int num_dev_resets;
828 static int num_target_resets;
829 static int num_bus_resets;
830 static int num_host_resets;
831 static int dix_writes;
832 static int dix_reads;
833 static int dif_errors;
835 /* ZBC global data */
836 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
837 static int sdeb_zbc_zone_cap_mb;
838 static int sdeb_zbc_zone_size_mb;
839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
842 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
843 static int poll_queues; /* iouring iopoll interface.*/
844 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
846 static DEFINE_RWLOCK(atomic_rw);
847 static DEFINE_RWLOCK(atomic_rw2);
849 static rwlock_t *ramdisk_lck_a[2];
851 static char sdebug_proc_name[] = MY_NAME;
852 static const char *my_name = MY_NAME;
854 static struct bus_type pseudo_lld_bus;
856 static struct device_driver sdebug_driverfs_driver = {
857 .name = sdebug_proc_name,
858 .bus = &pseudo_lld_bus,
861 static const int check_condition_result =
862 SAM_STAT_CHECK_CONDITION;
864 static const int illegal_condition_result =
865 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
867 static const int device_qfull_result =
868 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
870 static const int condition_met_result = SAM_STAT_CONDITION_MET;
873 /* Only do the extra work involved in logical block provisioning if one or
874 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
875 * real reads and writes (i.e. not skipping them for speed).
877 static inline bool scsi_debug_lbp(void)
879 return 0 == sdebug_fake_rw &&
880 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
883 static void *lba2fake_store(struct sdeb_store_info *sip,
884 unsigned long long lba)
886 struct sdeb_store_info *lsip = sip;
888 lba = do_div(lba, sdebug_store_sectors);
889 if (!sip || !sip->storep) {
891 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
893 return lsip->storep + lba * sdebug_sector_size;
896 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
899 sector = sector_div(sector, sdebug_store_sectors);
901 return sip->dif_storep + sector;
904 static void sdebug_max_tgts_luns(void)
906 struct sdebug_host_info *sdbg_host;
907 struct Scsi_Host *hpnt;
909 spin_lock(&sdebug_host_list_lock);
910 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
911 hpnt = sdbg_host->shost;
912 if ((hpnt->this_id >= 0) &&
913 (sdebug_num_tgts > hpnt->this_id))
914 hpnt->max_id = sdebug_num_tgts + 1;
916 hpnt->max_id = sdebug_num_tgts;
917 /* sdebug_max_luns; */
918 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
920 spin_unlock(&sdebug_host_list_lock);
923 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
925 /* Set in_bit to -1 to indicate no bit position of invalid field */
926 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
927 enum sdeb_cmd_data c_d,
928 int in_byte, int in_bit)
930 unsigned char *sbuff;
934 sbuff = scp->sense_buffer;
936 sdev_printk(KERN_ERR, scp->device,
937 "%s: sense_buffer is NULL\n", __func__);
940 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
941 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
942 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
943 memset(sks, 0, sizeof(sks));
949 sks[0] |= 0x7 & in_bit;
951 put_unaligned_be16(in_byte, sks + 1);
957 memcpy(sbuff + sl + 4, sks, 3);
959 memcpy(sbuff + 15, sks, 3);
961 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
962 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
963 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
966 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
968 if (!scp->sense_buffer) {
969 sdev_printk(KERN_ERR, scp->device,
970 "%s: sense_buffer is NULL\n", __func__);
973 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
975 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
978 sdev_printk(KERN_INFO, scp->device,
979 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980 my_name, key, asc, asq);
983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
985 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
991 if (sdebug_verbose) {
993 sdev_printk(KERN_INFO, dev,
994 "%s: BLKFLSBUF [0x1261]\n", __func__);
995 else if (0x5331 == cmd)
996 sdev_printk(KERN_INFO, dev,
997 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1000 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1004 /* return -ENOTTY; // correct return but upsets fdisk */
1007 static void config_cdb_len(struct scsi_device *sdev)
1009 switch (sdebug_cdb_len) {
1010 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011 sdev->use_10_for_rw = false;
1012 sdev->use_16_for_rw = false;
1013 sdev->use_10_for_ms = false;
1015 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016 sdev->use_10_for_rw = true;
1017 sdev->use_16_for_rw = false;
1018 sdev->use_10_for_ms = false;
1020 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021 sdev->use_10_for_rw = true;
1022 sdev->use_16_for_rw = false;
1023 sdev->use_10_for_ms = true;
1026 sdev->use_10_for_rw = false;
1027 sdev->use_16_for_rw = true;
1028 sdev->use_10_for_ms = true;
1030 case 32: /* No knobs to suggest this so same as 16 for now */
1031 sdev->use_10_for_rw = false;
1032 sdev->use_16_for_rw = true;
1033 sdev->use_10_for_ms = true;
1036 pr_warn("unexpected cdb_len=%d, force to 10\n",
1038 sdev->use_10_for_rw = true;
1039 sdev->use_16_for_rw = false;
1040 sdev->use_10_for_ms = false;
1041 sdebug_cdb_len = 10;
1046 static void all_config_cdb_len(void)
1048 struct sdebug_host_info *sdbg_host;
1049 struct Scsi_Host *shost;
1050 struct scsi_device *sdev;
1052 spin_lock(&sdebug_host_list_lock);
1053 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 shost = sdbg_host->shost;
1055 shost_for_each_device(sdev, shost) {
1056 config_cdb_len(sdev);
1059 spin_unlock(&sdebug_host_list_lock);
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1064 struct sdebug_host_info *sdhp;
1065 struct sdebug_dev_info *dp;
1067 spin_lock(&sdebug_host_list_lock);
1068 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 if ((devip->sdbg_host == dp->sdbg_host) &&
1071 (devip->target == dp->target))
1072 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1075 spin_unlock(&sdebug_host_list_lock);
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1082 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 if (k != SDEBUG_NUM_UAS) {
1084 const char *cp = NULL;
1088 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 POWER_ON_RESET_ASCQ);
1091 cp = "power on reset";
1093 case SDEBUG_UA_POOCCUR:
1094 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 POWER_ON_OCCURRED_ASCQ);
1097 cp = "power on occurred";
1099 case SDEBUG_UA_BUS_RESET:
1100 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1105 case SDEBUG_UA_MODE_CHANGED:
1106 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1109 cp = "mode parameters changed";
1111 case SDEBUG_UA_CAPACITY_CHANGED:
1112 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1113 CAPACITY_CHANGED_ASCQ);
1115 cp = "capacity data changed";
1117 case SDEBUG_UA_MICROCODE_CHANGED:
1118 mk_sense_buffer(scp, UNIT_ATTENTION,
1120 MICROCODE_CHANGED_ASCQ);
1122 cp = "microcode has been changed";
1124 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1125 mk_sense_buffer(scp, UNIT_ATTENTION,
1127 MICROCODE_CHANGED_WO_RESET_ASCQ);
1129 cp = "microcode has been changed without reset";
1131 case SDEBUG_UA_LUNS_CHANGED:
1133 * SPC-3 behavior is to report a UNIT ATTENTION with
1134 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1135 * on the target, until a REPORT LUNS command is
1136 * received. SPC-4 behavior is to report it only once.
1137 * NOTE: sdebug_scsi_level does not use the same
1138 * values as struct scsi_device->scsi_level.
1140 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1141 clear_luns_changed_on_target(devip);
1142 mk_sense_buffer(scp, UNIT_ATTENTION,
1146 cp = "reported luns data has changed";
1149 pr_warn("unexpected unit attention code=%d\n", k);
1154 clear_bit(k, devip->uas_bm);
1156 sdev_printk(KERN_INFO, scp->device,
1157 "%s reports: Unit attention: %s\n",
1159 return check_condition_result;
1164 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1165 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1169 struct scsi_data_buffer *sdb = &scp->sdb;
1173 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1174 return DID_ERROR << 16;
1176 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1178 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1183 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1184 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1185 * calls, not required to write in ascending offset order. Assumes resid
1186 * set to scsi_bufflen() prior to any calls.
1188 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1189 int arr_len, unsigned int off_dst)
1191 unsigned int act_len, n;
1192 struct scsi_data_buffer *sdb = &scp->sdb;
1193 off_t skip = off_dst;
1195 if (sdb->length <= off_dst)
1197 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1198 return DID_ERROR << 16;
1200 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1201 arr, arr_len, skip);
1202 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1203 __func__, off_dst, scsi_bufflen(scp), act_len,
1204 scsi_get_resid(scp));
1205 n = scsi_bufflen(scp) - (off_dst + act_len);
1206 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1210 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1211 * 'arr' or -1 if error.
1213 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1216 if (!scsi_bufflen(scp))
1218 if (scp->sc_data_direction != DMA_TO_DEVICE)
1221 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1225 static char sdebug_inq_vendor_id[9] = "Linux ";
1226 static char sdebug_inq_product_id[17] = "scsi_debug ";
1227 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1228 /* Use some locally assigned NAAs for SAS addresses. */
1229 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1230 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1231 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1233 /* Device identification VPD page. Returns number of bytes placed in arr */
1234 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1235 int target_dev_id, int dev_id_num,
1236 const char *dev_id_str, int dev_id_str_len,
1237 const uuid_t *lu_name)
1242 port_a = target_dev_id + 1;
1243 /* T10 vendor identifier field format (faked) */
1244 arr[0] = 0x2; /* ASCII */
1247 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1248 memcpy(&arr[12], sdebug_inq_product_id, 16);
1249 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1250 num = 8 + 16 + dev_id_str_len;
1253 if (dev_id_num >= 0) {
1254 if (sdebug_uuid_ctl) {
1255 /* Locally assigned UUID */
1256 arr[num++] = 0x1; /* binary (not necessarily sas) */
1257 arr[num++] = 0xa; /* PIV=0, lu, naa */
1260 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1262 memcpy(arr + num, lu_name, 16);
1265 /* NAA-3, Logical unit identifier (binary) */
1266 arr[num++] = 0x1; /* binary (not necessarily sas) */
1267 arr[num++] = 0x3; /* PIV=0, lu, naa */
1270 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1273 /* Target relative port number */
1274 arr[num++] = 0x61; /* proto=sas, binary */
1275 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1276 arr[num++] = 0x0; /* reserved */
1277 arr[num++] = 0x4; /* length */
1278 arr[num++] = 0x0; /* reserved */
1279 arr[num++] = 0x0; /* reserved */
1281 arr[num++] = 0x1; /* relative port A */
1283 /* NAA-3, Target port identifier */
1284 arr[num++] = 0x61; /* proto=sas, binary */
1285 arr[num++] = 0x93; /* piv=1, target port, naa */
1288 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1290 /* NAA-3, Target port group identifier */
1291 arr[num++] = 0x61; /* proto=sas, binary */
1292 arr[num++] = 0x95; /* piv=1, target port group id */
1297 put_unaligned_be16(port_group_id, arr + num);
1299 /* NAA-3, Target device identifier */
1300 arr[num++] = 0x61; /* proto=sas, binary */
1301 arr[num++] = 0xa3; /* piv=1, target device, naa */
1304 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1306 /* SCSI name string: Target device identifier */
1307 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1308 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1311 memcpy(arr + num, "naa.32222220", 12);
1313 snprintf(b, sizeof(b), "%08X", target_dev_id);
1314 memcpy(arr + num, b, 8);
1316 memset(arr + num, 0, 4);
1321 static unsigned char vpd84_data[] = {
1322 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1323 0x22,0x22,0x22,0x0,0xbb,0x1,
1324 0x22,0x22,0x22,0x0,0xbb,0x2,
1327 /* Software interface identification VPD page */
1328 static int inquiry_vpd_84(unsigned char *arr)
1330 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1331 return sizeof(vpd84_data);
1334 /* Management network addresses VPD page */
1335 static int inquiry_vpd_85(unsigned char *arr)
1338 const char *na1 = "https://www.kernel.org/config";
1339 const char *na2 = "http://www.kernel.org/log";
1342 arr[num++] = 0x1; /* lu, storage config */
1343 arr[num++] = 0x0; /* reserved */
1348 plen = ((plen / 4) + 1) * 4;
1349 arr[num++] = plen; /* length, null termianted, padded */
1350 memcpy(arr + num, na1, olen);
1351 memset(arr + num + olen, 0, plen - olen);
1354 arr[num++] = 0x4; /* lu, logging */
1355 arr[num++] = 0x0; /* reserved */
1360 plen = ((plen / 4) + 1) * 4;
1361 arr[num++] = plen; /* length, null terminated, padded */
1362 memcpy(arr + num, na2, olen);
1363 memset(arr + num + olen, 0, plen - olen);
1369 /* SCSI ports VPD page */
1370 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1375 port_a = target_dev_id + 1;
1376 port_b = port_a + 1;
1377 arr[num++] = 0x0; /* reserved */
1378 arr[num++] = 0x0; /* reserved */
1380 arr[num++] = 0x1; /* relative port 1 (primary) */
1381 memset(arr + num, 0, 6);
1384 arr[num++] = 12; /* length tp descriptor */
1385 /* naa-5 target port identifier (A) */
1386 arr[num++] = 0x61; /* proto=sas, binary */
1387 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1388 arr[num++] = 0x0; /* reserved */
1389 arr[num++] = 0x8; /* length */
1390 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1392 arr[num++] = 0x0; /* reserved */
1393 arr[num++] = 0x0; /* reserved */
1395 arr[num++] = 0x2; /* relative port 2 (secondary) */
1396 memset(arr + num, 0, 6);
1399 arr[num++] = 12; /* length tp descriptor */
1400 /* naa-5 target port identifier (B) */
1401 arr[num++] = 0x61; /* proto=sas, binary */
1402 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1403 arr[num++] = 0x0; /* reserved */
1404 arr[num++] = 0x8; /* length */
1405 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1412 static unsigned char vpd89_data[] = {
1413 /* from 4th byte */ 0,0,0,0,
1414 'l','i','n','u','x',' ',' ',' ',
1415 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1417 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1419 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1420 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1422 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1428 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1429 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1430 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1432 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1433 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1434 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1439 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1440 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1441 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1456 /* ATA Information VPD page */
1457 static int inquiry_vpd_89(unsigned char *arr)
1459 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1460 return sizeof(vpd89_data);
1464 static unsigned char vpdb0_data[] = {
1465 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1466 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1471 /* Block limits VPD page (SBC-3) */
1472 static int inquiry_vpd_b0(unsigned char *arr)
1476 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1478 /* Optimal transfer length granularity */
1479 if (sdebug_opt_xferlen_exp != 0 &&
1480 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1481 gran = 1 << sdebug_opt_xferlen_exp;
1483 gran = 1 << sdebug_physblk_exp;
1484 put_unaligned_be16(gran, arr + 2);
1486 /* Maximum Transfer Length */
1487 if (sdebug_store_sectors > 0x400)
1488 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1490 /* Optimal Transfer Length */
1491 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1494 /* Maximum Unmap LBA Count */
1495 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1497 /* Maximum Unmap Block Descriptor Count */
1498 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1501 /* Unmap Granularity Alignment */
1502 if (sdebug_unmap_alignment) {
1503 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1504 arr[28] |= 0x80; /* UGAVALID */
1507 /* Optimal Unmap Granularity */
1508 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1510 /* Maximum WRITE SAME Length */
1511 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1513 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1515 return sizeof(vpdb0_data);
1518 /* Block device characteristics VPD page (SBC-3) */
1519 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1521 memset(arr, 0, 0x3c);
1523 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1525 arr[3] = 5; /* less than 1.8" */
1526 if (devip->zmodel == BLK_ZONED_HA)
1527 arr[4] = 1 << 4; /* zoned field = 01b */
1532 /* Logical block provisioning VPD page (SBC-4) */
1533 static int inquiry_vpd_b2(unsigned char *arr)
1535 memset(arr, 0, 0x4);
1536 arr[0] = 0; /* threshold exponent */
1543 if (sdebug_lbprz && scsi_debug_lbp())
1544 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1545 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1546 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1547 /* threshold_percentage=0 */
1551 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1552 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1554 memset(arr, 0, 0x3c);
1555 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1557 * Set Optimal number of open sequential write preferred zones and
1558 * Optimal number of non-sequentially written sequential write
1559 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1560 * fields set to zero, apart from Max. number of open swrz_s field.
1562 put_unaligned_be32(0xffffffff, &arr[4]);
1563 put_unaligned_be32(0xffffffff, &arr[8]);
1564 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1565 put_unaligned_be32(devip->max_open, &arr[12]);
1567 put_unaligned_be32(0xffffffff, &arr[12]);
1568 if (devip->zcap < devip->zsize) {
1569 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1570 put_unaligned_be64(devip->zsize, &arr[20]);
1577 #define SDEBUG_LONG_INQ_SZ 96
1578 #define SDEBUG_MAX_INQ_ARR_SZ 584
1580 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1582 unsigned char pq_pdt;
1584 unsigned char *cmd = scp->cmnd;
1587 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1589 alloc_len = get_unaligned_be16(cmd + 3);
1590 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1592 return DID_REQUEUE << 16;
1593 is_disk = (sdebug_ptype == TYPE_DISK);
1594 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1595 is_disk_zbc = (is_disk || is_zbc);
1596 have_wlun = scsi_is_wlun(scp->device->lun);
1598 pq_pdt = TYPE_WLUN; /* present, wlun */
1599 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1600 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1602 pq_pdt = (sdebug_ptype & 0x1f);
1604 if (0x2 & cmd[1]) { /* CMDDT bit set */
1605 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1607 return check_condition_result;
1608 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1609 int lu_id_num, port_group_id, target_dev_id;
1612 int host_no = devip->sdbg_host->shost->host_no;
1614 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1615 (devip->channel & 0x7f);
1616 if (sdebug_vpd_use_hostno == 0)
1618 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1619 (devip->target * 1000) + devip->lun);
1620 target_dev_id = ((host_no + 1) * 2000) +
1621 (devip->target * 1000) - 3;
1622 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1623 if (0 == cmd[2]) { /* supported vital product data pages */
1624 arr[1] = cmd[2]; /*sanity */
1626 arr[n++] = 0x0; /* this page */
1627 arr[n++] = 0x80; /* unit serial number */
1628 arr[n++] = 0x83; /* device identification */
1629 arr[n++] = 0x84; /* software interface ident. */
1630 arr[n++] = 0x85; /* management network addresses */
1631 arr[n++] = 0x86; /* extended inquiry */
1632 arr[n++] = 0x87; /* mode page policy */
1633 arr[n++] = 0x88; /* SCSI ports */
1634 if (is_disk_zbc) { /* SBC or ZBC */
1635 arr[n++] = 0x89; /* ATA information */
1636 arr[n++] = 0xb0; /* Block limits */
1637 arr[n++] = 0xb1; /* Block characteristics */
1639 arr[n++] = 0xb2; /* LB Provisioning */
1641 arr[n++] = 0xb6; /* ZB dev. char. */
1643 arr[3] = n - 4; /* number of supported VPD pages */
1644 } else if (0x80 == cmd[2]) { /* unit serial number */
1645 arr[1] = cmd[2]; /*sanity */
1647 memcpy(&arr[4], lu_id_str, len);
1648 } else if (0x83 == cmd[2]) { /* device identification */
1649 arr[1] = cmd[2]; /*sanity */
1650 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1651 target_dev_id, lu_id_num,
1654 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1655 arr[1] = cmd[2]; /*sanity */
1656 arr[3] = inquiry_vpd_84(&arr[4]);
1657 } else if (0x85 == cmd[2]) { /* Management network addresses */
1658 arr[1] = cmd[2]; /*sanity */
1659 arr[3] = inquiry_vpd_85(&arr[4]);
1660 } else if (0x86 == cmd[2]) { /* extended inquiry */
1661 arr[1] = cmd[2]; /*sanity */
1662 arr[3] = 0x3c; /* number of following entries */
1663 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1664 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1665 else if (have_dif_prot)
1666 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1668 arr[4] = 0x0; /* no protection stuff */
1669 arr[5] = 0x7; /* head of q, ordered + simple q's */
1670 } else if (0x87 == cmd[2]) { /* mode page policy */
1671 arr[1] = cmd[2]; /*sanity */
1672 arr[3] = 0x8; /* number of following entries */
1673 arr[4] = 0x2; /* disconnect-reconnect mp */
1674 arr[6] = 0x80; /* mlus, shared */
1675 arr[8] = 0x18; /* protocol specific lu */
1676 arr[10] = 0x82; /* mlus, per initiator port */
1677 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1678 arr[1] = cmd[2]; /*sanity */
1679 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1680 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1681 arr[1] = cmd[2]; /*sanity */
1682 n = inquiry_vpd_89(&arr[4]);
1683 put_unaligned_be16(n, arr + 2);
1684 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1685 arr[1] = cmd[2]; /*sanity */
1686 arr[3] = inquiry_vpd_b0(&arr[4]);
1687 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1688 arr[1] = cmd[2]; /*sanity */
1689 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1690 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1691 arr[1] = cmd[2]; /*sanity */
1692 arr[3] = inquiry_vpd_b2(&arr[4]);
1693 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1694 arr[1] = cmd[2]; /*sanity */
1695 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1697 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1699 return check_condition_result;
1701 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1702 ret = fill_from_dev_buffer(scp, arr,
1703 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1707 /* drops through here for a standard inquiry */
1708 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1709 arr[2] = sdebug_scsi_level;
1710 arr[3] = 2; /* response_data_format==2 */
1711 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1712 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1713 if (sdebug_vpd_use_hostno == 0)
1714 arr[5] |= 0x10; /* claim: implicit TPGS */
1715 arr[6] = 0x10; /* claim: MultiP */
1716 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1717 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1718 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1719 memcpy(&arr[16], sdebug_inq_product_id, 16);
1720 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1721 /* Use Vendor Specific area to place driver date in ASCII hex */
1722 memcpy(&arr[36], sdebug_version_date, 8);
1723 /* version descriptors (2 bytes each) follow */
1724 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1725 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1727 if (is_disk) { /* SBC-4 no version claimed */
1728 put_unaligned_be16(0x600, arr + n);
1730 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1731 put_unaligned_be16(0x525, arr + n);
1733 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1734 put_unaligned_be16(0x624, arr + n);
1737 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1738 ret = fill_from_dev_buffer(scp, arr,
1739 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1744 /* See resp_iec_m_pg() for how this data is manipulated */
1745 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1748 static int resp_requests(struct scsi_cmnd *scp,
1749 struct sdebug_dev_info *devip)
1751 unsigned char *cmd = scp->cmnd;
1752 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1753 bool dsense = !!(cmd[1] & 1);
1754 u32 alloc_len = cmd[4];
1756 int stopped_state = atomic_read(&devip->stopped);
1758 memset(arr, 0, sizeof(arr));
1759 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1763 arr[2] = LOGICAL_UNIT_NOT_READY;
1764 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1768 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1769 arr[7] = 0xa; /* 18 byte sense buffer */
1770 arr[12] = LOGICAL_UNIT_NOT_READY;
1771 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1773 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1774 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1777 arr[1] = 0x0; /* NO_SENSE in sense_key */
1778 arr[2] = THRESHOLD_EXCEEDED;
1779 arr[3] = 0xff; /* Failure prediction(false) */
1783 arr[2] = 0x0; /* NO_SENSE in sense_key */
1784 arr[7] = 0xa; /* 18 byte sense buffer */
1785 arr[12] = THRESHOLD_EXCEEDED;
1786 arr[13] = 0xff; /* Failure prediction(false) */
1788 } else { /* nothing to report */
1791 memset(arr, 0, len);
1794 memset(arr, 0, len);
1799 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1802 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1804 unsigned char *cmd = scp->cmnd;
1805 int power_cond, want_stop, stopped_state;
1808 power_cond = (cmd[4] & 0xf0) >> 4;
1810 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1811 return check_condition_result;
1813 want_stop = !(cmd[4] & 1);
1814 stopped_state = atomic_read(&devip->stopped);
1815 if (stopped_state == 2) {
1816 ktime_t now_ts = ktime_get_boottime();
1818 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1819 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1821 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1822 /* tur_ms_to_ready timer extinguished */
1823 atomic_set(&devip->stopped, 0);
1827 if (stopped_state == 2) {
1829 stopped_state = 1; /* dummy up success */
1830 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1831 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1832 return check_condition_result;
1836 changing = (stopped_state != want_stop);
1838 atomic_xchg(&devip->stopped, want_stop);
1839 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1840 return SDEG_RES_IMMED_MASK;
1845 static sector_t get_sdebug_capacity(void)
1847 static const unsigned int gibibyte = 1073741824;
1849 if (sdebug_virtual_gb > 0)
1850 return (sector_t)sdebug_virtual_gb *
1851 (gibibyte / sdebug_sector_size);
1853 return sdebug_store_sectors;
1856 #define SDEBUG_READCAP_ARR_SZ 8
1857 static int resp_readcap(struct scsi_cmnd *scp,
1858 struct sdebug_dev_info *devip)
1860 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1863 /* following just in case virtual_gb changed */
1864 sdebug_capacity = get_sdebug_capacity();
1865 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1866 if (sdebug_capacity < 0xffffffff) {
1867 capac = (unsigned int)sdebug_capacity - 1;
1868 put_unaligned_be32(capac, arr + 0);
1870 put_unaligned_be32(0xffffffff, arr + 0);
1871 put_unaligned_be16(sdebug_sector_size, arr + 6);
1872 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1875 #define SDEBUG_READCAP16_ARR_SZ 32
1876 static int resp_readcap16(struct scsi_cmnd *scp,
1877 struct sdebug_dev_info *devip)
1879 unsigned char *cmd = scp->cmnd;
1880 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1883 alloc_len = get_unaligned_be32(cmd + 10);
1884 /* following just in case virtual_gb changed */
1885 sdebug_capacity = get_sdebug_capacity();
1886 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1887 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1888 put_unaligned_be32(sdebug_sector_size, arr + 8);
1889 arr[13] = sdebug_physblk_exp & 0xf;
1890 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1892 if (scsi_debug_lbp()) {
1893 arr[14] |= 0x80; /* LBPME */
1894 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1895 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1896 * in the wider field maps to 0 in this field.
1898 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1903 * Since the scsi_debug READ CAPACITY implementation always reports the
1904 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1906 if (devip->zmodel == BLK_ZONED_HM)
1909 arr[15] = sdebug_lowest_aligned & 0xff;
1911 if (have_dif_prot) {
1912 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1913 arr[12] |= 1; /* PROT_EN */
1916 return fill_from_dev_buffer(scp, arr,
1917 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1920 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1922 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1923 struct sdebug_dev_info *devip)
1925 unsigned char *cmd = scp->cmnd;
1927 int host_no = devip->sdbg_host->shost->host_no;
1928 int port_group_a, port_group_b, port_a, port_b;
1932 alen = get_unaligned_be32(cmd + 6);
1933 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1935 return DID_REQUEUE << 16;
1937 * EVPD page 0x88 states we have two ports, one
1938 * real and a fake port with no device connected.
1939 * So we create two port groups with one port each
1940 * and set the group with port B to unavailable.
1942 port_a = 0x1; /* relative port A */
1943 port_b = 0x2; /* relative port B */
1944 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1945 (devip->channel & 0x7f);
1946 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1947 (devip->channel & 0x7f) + 0x80;
1950 * The asymmetric access state is cycled according to the host_id.
1953 if (sdebug_vpd_use_hostno == 0) {
1954 arr[n++] = host_no % 3; /* Asymm access state */
1955 arr[n++] = 0x0F; /* claim: all states are supported */
1957 arr[n++] = 0x0; /* Active/Optimized path */
1958 arr[n++] = 0x01; /* only support active/optimized paths */
1960 put_unaligned_be16(port_group_a, arr + n);
1962 arr[n++] = 0; /* Reserved */
1963 arr[n++] = 0; /* Status code */
1964 arr[n++] = 0; /* Vendor unique */
1965 arr[n++] = 0x1; /* One port per group */
1966 arr[n++] = 0; /* Reserved */
1967 arr[n++] = 0; /* Reserved */
1968 put_unaligned_be16(port_a, arr + n);
1970 arr[n++] = 3; /* Port unavailable */
1971 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1972 put_unaligned_be16(port_group_b, arr + n);
1974 arr[n++] = 0; /* Reserved */
1975 arr[n++] = 0; /* Status code */
1976 arr[n++] = 0; /* Vendor unique */
1977 arr[n++] = 0x1; /* One port per group */
1978 arr[n++] = 0; /* Reserved */
1979 arr[n++] = 0; /* Reserved */
1980 put_unaligned_be16(port_b, arr + n);
1984 put_unaligned_be32(rlen, arr + 0);
1987 * Return the smallest value of either
1988 * - The allocated length
1989 * - The constructed command length
1990 * - The maximum array size
1992 rlen = min(alen, n);
1993 ret = fill_from_dev_buffer(scp, arr,
1994 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1999 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2000 struct sdebug_dev_info *devip)
2003 u8 reporting_opts, req_opcode, sdeb_i, supp;
2005 u32 alloc_len, a_len;
2006 int k, offset, len, errsts, count, bump, na;
2007 const struct opcode_info_t *oip;
2008 const struct opcode_info_t *r_oip;
2010 u8 *cmd = scp->cmnd;
2012 rctd = !!(cmd[2] & 0x80);
2013 reporting_opts = cmd[2] & 0x7;
2014 req_opcode = cmd[3];
2015 req_sa = get_unaligned_be16(cmd + 4);
2016 alloc_len = get_unaligned_be32(cmd + 6);
2017 if (alloc_len < 4 || alloc_len > 0xffff) {
2018 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2019 return check_condition_result;
2021 if (alloc_len > 8192)
2025 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2027 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2029 return check_condition_result;
2031 switch (reporting_opts) {
2032 case 0: /* all commands */
2033 /* count number of commands */
2034 for (count = 0, oip = opcode_info_arr;
2035 oip->num_attached != 0xff; ++oip) {
2036 if (F_INV_OP & oip->flags)
2038 count += (oip->num_attached + 1);
2040 bump = rctd ? 20 : 8;
2041 put_unaligned_be32(count * bump, arr);
2042 for (offset = 4, oip = opcode_info_arr;
2043 oip->num_attached != 0xff && offset < a_len; ++oip) {
2044 if (F_INV_OP & oip->flags)
2046 na = oip->num_attached;
2047 arr[offset] = oip->opcode;
2048 put_unaligned_be16(oip->sa, arr + offset + 2);
2050 arr[offset + 5] |= 0x2;
2051 if (FF_SA & oip->flags)
2052 arr[offset + 5] |= 0x1;
2053 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2055 put_unaligned_be16(0xa, arr + offset + 8);
2057 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2058 if (F_INV_OP & oip->flags)
2061 arr[offset] = oip->opcode;
2062 put_unaligned_be16(oip->sa, arr + offset + 2);
2064 arr[offset + 5] |= 0x2;
2065 if (FF_SA & oip->flags)
2066 arr[offset + 5] |= 0x1;
2067 put_unaligned_be16(oip->len_mask[0],
2070 put_unaligned_be16(0xa,
2077 case 1: /* one command: opcode only */
2078 case 2: /* one command: opcode plus service action */
2079 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2080 sdeb_i = opcode_ind_arr[req_opcode];
2081 oip = &opcode_info_arr[sdeb_i];
2082 if (F_INV_OP & oip->flags) {
2086 if (1 == reporting_opts) {
2087 if (FF_SA & oip->flags) {
2088 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2091 return check_condition_result;
2094 } else if (2 == reporting_opts &&
2095 0 == (FF_SA & oip->flags)) {
2096 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2097 kfree(arr); /* point at requested sa */
2098 return check_condition_result;
2100 if (0 == (FF_SA & oip->flags) &&
2101 req_opcode == oip->opcode)
2103 else if (0 == (FF_SA & oip->flags)) {
2104 na = oip->num_attached;
2105 for (k = 0, oip = oip->arrp; k < na;
2107 if (req_opcode == oip->opcode)
2110 supp = (k >= na) ? 1 : 3;
2111 } else if (req_sa != oip->sa) {
2112 na = oip->num_attached;
2113 for (k = 0, oip = oip->arrp; k < na;
2115 if (req_sa == oip->sa)
2118 supp = (k >= na) ? 1 : 3;
2122 u = oip->len_mask[0];
2123 put_unaligned_be16(u, arr + 2);
2124 arr[4] = oip->opcode;
2125 for (k = 1; k < u; ++k)
2126 arr[4 + k] = (k < 16) ?
2127 oip->len_mask[k] : 0xff;
2132 arr[1] = (rctd ? 0x80 : 0) | supp;
2134 put_unaligned_be16(0xa, arr + offset);
2139 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2141 return check_condition_result;
2143 offset = (offset < a_len) ? offset : a_len;
2144 len = (offset < alloc_len) ? offset : alloc_len;
2145 errsts = fill_from_dev_buffer(scp, arr, len);
2150 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2151 struct sdebug_dev_info *devip)
2156 u8 *cmd = scp->cmnd;
2158 memset(arr, 0, sizeof(arr));
2159 repd = !!(cmd[2] & 0x80);
2160 alloc_len = get_unaligned_be32(cmd + 6);
2161 if (alloc_len < 4) {
2162 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2163 return check_condition_result;
2165 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2166 arr[1] = 0x1; /* ITNRS */
2173 len = (len < alloc_len) ? len : alloc_len;
2174 return fill_from_dev_buffer(scp, arr, len);
2177 /* <<Following mode page info copied from ST318451LW>> */
2179 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2180 { /* Read-Write Error Recovery page for mode_sense */
2181 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2184 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2186 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2187 return sizeof(err_recov_pg);
2190 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2191 { /* Disconnect-Reconnect page for mode_sense */
2192 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2193 0, 0, 0, 0, 0, 0, 0, 0};
2195 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2197 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2198 return sizeof(disconnect_pg);
2201 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2202 { /* Format device page for mode_sense */
2203 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2204 0, 0, 0, 0, 0, 0, 0, 0,
2205 0, 0, 0, 0, 0x40, 0, 0, 0};
2207 memcpy(p, format_pg, sizeof(format_pg));
2208 put_unaligned_be16(sdebug_sectors_per, p + 10);
2209 put_unaligned_be16(sdebug_sector_size, p + 12);
2210 if (sdebug_removable)
2211 p[20] |= 0x20; /* should agree with INQUIRY */
2213 memset(p + 2, 0, sizeof(format_pg) - 2);
2214 return sizeof(format_pg);
2217 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2218 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2221 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2222 { /* Caching page for mode_sense */
2223 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2224 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2225 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2226 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2228 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2229 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2230 memcpy(p, caching_pg, sizeof(caching_pg));
2232 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2233 else if (2 == pcontrol)
2234 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2235 return sizeof(caching_pg);
2238 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2241 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2242 { /* Control mode page for mode_sense */
2243 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2245 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2249 ctrl_m_pg[2] |= 0x4;
2251 ctrl_m_pg[2] &= ~0x4;
2254 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2256 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2258 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2259 else if (2 == pcontrol)
2260 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2261 return sizeof(ctrl_m_pg);
2265 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2266 { /* Informational Exceptions control mode page for mode_sense */
2267 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2269 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2272 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2274 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2275 else if (2 == pcontrol)
2276 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2277 return sizeof(iec_m_pg);
2280 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2281 { /* SAS SSP mode page - short format for mode_sense */
2282 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2283 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2285 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2287 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2288 return sizeof(sas_sf_m_pg);
2292 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2294 { /* SAS phy control and discover mode page for mode_sense */
2295 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2296 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2297 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2298 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2299 0x2, 0, 0, 0, 0, 0, 0, 0,
2300 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2301 0, 0, 0, 0, 0, 0, 0, 0,
2302 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2303 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2304 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2305 0x3, 0, 0, 0, 0, 0, 0, 0,
2306 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2307 0, 0, 0, 0, 0, 0, 0, 0,
2311 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2312 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2313 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2314 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2315 port_a = target_dev_id + 1;
2316 port_b = port_a + 1;
2317 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2318 put_unaligned_be32(port_a, p + 20);
2319 put_unaligned_be32(port_b, p + 48 + 20);
2321 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2322 return sizeof(sas_pcd_m_pg);
2325 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2326 { /* SAS SSP shared protocol specific port mode subpage */
2327 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2328 0, 0, 0, 0, 0, 0, 0, 0,
2331 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2333 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2334 return sizeof(sas_sha_m_pg);
2337 #define SDEBUG_MAX_MSENSE_SZ 256
2339 static int resp_mode_sense(struct scsi_cmnd *scp,
2340 struct sdebug_dev_info *devip)
2342 int pcontrol, pcode, subpcode, bd_len;
2343 unsigned char dev_spec;
2344 u32 alloc_len, offset, len;
2346 int target = scp->device->id;
2348 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2349 unsigned char *cmd = scp->cmnd;
2350 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2352 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2353 pcontrol = (cmd[2] & 0xc0) >> 6;
2354 pcode = cmd[2] & 0x3f;
2356 msense_6 = (MODE_SENSE == cmd[0]);
2357 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2358 is_disk = (sdebug_ptype == TYPE_DISK);
2359 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2360 if ((is_disk || is_zbc) && !dbd)
2361 bd_len = llbaa ? 16 : 8;
2364 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2365 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2366 if (0x3 == pcontrol) { /* Saving values not supported */
2367 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2368 return check_condition_result;
2370 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2371 (devip->target * 1000) - 3;
2372 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2373 if (is_disk || is_zbc) {
2374 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2386 arr[4] = 0x1; /* set LONGLBA bit */
2387 arr[7] = bd_len; /* assume 255 or less */
2391 if ((bd_len > 0) && (!sdebug_capacity))
2392 sdebug_capacity = get_sdebug_capacity();
2395 if (sdebug_capacity > 0xfffffffe)
2396 put_unaligned_be32(0xffffffff, ap + 0);
2398 put_unaligned_be32(sdebug_capacity, ap + 0);
2399 put_unaligned_be16(sdebug_sector_size, ap + 6);
2402 } else if (16 == bd_len) {
2403 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2404 put_unaligned_be32(sdebug_sector_size, ap + 12);
2409 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2410 /* TODO: Control Extension page */
2411 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2412 return check_condition_result;
2417 case 0x1: /* Read-Write error recovery page, direct access */
2418 len = resp_err_recov_pg(ap, pcontrol, target);
2421 case 0x2: /* Disconnect-Reconnect page, all devices */
2422 len = resp_disconnect_pg(ap, pcontrol, target);
2425 case 0x3: /* Format device page, direct access */
2427 len = resp_format_pg(ap, pcontrol, target);
2432 case 0x8: /* Caching page, direct access */
2433 if (is_disk || is_zbc) {
2434 len = resp_caching_pg(ap, pcontrol, target);
2439 case 0xa: /* Control Mode page, all devices */
2440 len = resp_ctrl_m_pg(ap, pcontrol, target);
2443 case 0x19: /* if spc==1 then sas phy, control+discover */
2444 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2445 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2446 return check_condition_result;
2449 if ((0x0 == subpcode) || (0xff == subpcode))
2450 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2451 if ((0x1 == subpcode) || (0xff == subpcode))
2452 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2454 if ((0x2 == subpcode) || (0xff == subpcode))
2455 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2458 case 0x1c: /* Informational Exceptions Mode page, all devices */
2459 len = resp_iec_m_pg(ap, pcontrol, target);
2462 case 0x3f: /* Read all Mode pages */
2463 if ((0 == subpcode) || (0xff == subpcode)) {
2464 len = resp_err_recov_pg(ap, pcontrol, target);
2465 len += resp_disconnect_pg(ap + len, pcontrol, target);
2467 len += resp_format_pg(ap + len, pcontrol,
2469 len += resp_caching_pg(ap + len, pcontrol,
2471 } else if (is_zbc) {
2472 len += resp_caching_pg(ap + len, pcontrol,
2475 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2476 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2477 if (0xff == subpcode) {
2478 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2479 target, target_dev_id);
2480 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2482 len += resp_iec_m_pg(ap + len, pcontrol, target);
2485 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2486 return check_condition_result;
2494 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2495 return check_condition_result;
2498 arr[0] = offset - 1;
2500 put_unaligned_be16((offset - 2), arr + 0);
2501 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2504 #define SDEBUG_MAX_MSELECT_SZ 512
2506 static int resp_mode_select(struct scsi_cmnd *scp,
2507 struct sdebug_dev_info *devip)
2509 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2510 int param_len, res, mpage;
2511 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2512 unsigned char *cmd = scp->cmnd;
2513 int mselect6 = (MODE_SELECT == cmd[0]);
2515 memset(arr, 0, sizeof(arr));
2518 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2519 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2520 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2521 return check_condition_result;
2523 res = fetch_to_dev_buffer(scp, arr, param_len);
2525 return DID_ERROR << 16;
2526 else if (sdebug_verbose && (res < param_len))
2527 sdev_printk(KERN_INFO, scp->device,
2528 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2529 __func__, param_len, res);
2530 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2531 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2532 off = bd_len + (mselect6 ? 4 : 8);
2533 if (md_len > 2 || off >= res) {
2534 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2535 return check_condition_result;
2537 mpage = arr[off] & 0x3f;
2538 ps = !!(arr[off] & 0x80);
2540 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2541 return check_condition_result;
2543 spf = !!(arr[off] & 0x40);
2544 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2546 if ((pg_len + off) > param_len) {
2547 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2548 PARAMETER_LIST_LENGTH_ERR, 0);
2549 return check_condition_result;
2552 case 0x8: /* Caching Mode page */
2553 if (caching_pg[1] == arr[off + 1]) {
2554 memcpy(caching_pg + 2, arr + off + 2,
2555 sizeof(caching_pg) - 2);
2556 goto set_mode_changed_ua;
2559 case 0xa: /* Control Mode page */
2560 if (ctrl_m_pg[1] == arr[off + 1]) {
2561 memcpy(ctrl_m_pg + 2, arr + off + 2,
2562 sizeof(ctrl_m_pg) - 2);
2563 if (ctrl_m_pg[4] & 0x8)
2567 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2568 goto set_mode_changed_ua;
2571 case 0x1c: /* Informational Exceptions Mode page */
2572 if (iec_m_pg[1] == arr[off + 1]) {
2573 memcpy(iec_m_pg + 2, arr + off + 2,
2574 sizeof(iec_m_pg) - 2);
2575 goto set_mode_changed_ua;
2581 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2582 return check_condition_result;
2583 set_mode_changed_ua:
2584 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2588 static int resp_temp_l_pg(unsigned char *arr)
2590 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2591 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2594 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2595 return sizeof(temp_l_pg);
2598 static int resp_ie_l_pg(unsigned char *arr)
2600 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2603 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2604 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2605 arr[4] = THRESHOLD_EXCEEDED;
2608 return sizeof(ie_l_pg);
2611 static int resp_env_rep_l_spg(unsigned char *arr)
2613 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2614 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2615 0x1, 0x0, 0x23, 0x8,
2616 0x0, 55, 72, 35, 55, 45, 0, 0,
2619 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2620 return sizeof(env_rep_l_spg);
2623 #define SDEBUG_MAX_LSENSE_SZ 512
2625 static int resp_log_sense(struct scsi_cmnd *scp,
2626 struct sdebug_dev_info *devip)
2628 int ppc, sp, pcode, subpcode;
2629 u32 alloc_len, len, n;
2630 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2631 unsigned char *cmd = scp->cmnd;
2633 memset(arr, 0, sizeof(arr));
2637 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2638 return check_condition_result;
2640 pcode = cmd[2] & 0x3f;
2641 subpcode = cmd[3] & 0xff;
2642 alloc_len = get_unaligned_be16(cmd + 7);
2644 if (0 == subpcode) {
2646 case 0x0: /* Supported log pages log page */
2648 arr[n++] = 0x0; /* this page */
2649 arr[n++] = 0xd; /* Temperature */
2650 arr[n++] = 0x2f; /* Informational exceptions */
2653 case 0xd: /* Temperature log page */
2654 arr[3] = resp_temp_l_pg(arr + 4);
2656 case 0x2f: /* Informational exceptions log page */
2657 arr[3] = resp_ie_l_pg(arr + 4);
2660 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2661 return check_condition_result;
2663 } else if (0xff == subpcode) {
2667 case 0x0: /* Supported log pages and subpages log page */
2670 arr[n++] = 0x0; /* 0,0 page */
2672 arr[n++] = 0xff; /* this page */
2674 arr[n++] = 0x0; /* Temperature */
2676 arr[n++] = 0x1; /* Environment reporting */
2678 arr[n++] = 0xff; /* all 0xd subpages */
2680 arr[n++] = 0x0; /* Informational exceptions */
2682 arr[n++] = 0xff; /* all 0x2f subpages */
2685 case 0xd: /* Temperature subpages */
2688 arr[n++] = 0x0; /* Temperature */
2690 arr[n++] = 0x1; /* Environment reporting */
2692 arr[n++] = 0xff; /* these subpages */
2695 case 0x2f: /* Informational exceptions subpages */
2698 arr[n++] = 0x0; /* Informational exceptions */
2700 arr[n++] = 0xff; /* these subpages */
2704 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2705 return check_condition_result;
2707 } else if (subpcode > 0) {
2710 if (pcode == 0xd && subpcode == 1)
2711 arr[3] = resp_env_rep_l_spg(arr + 4);
2713 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2714 return check_condition_result;
2717 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2718 return check_condition_result;
2720 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2721 return fill_from_dev_buffer(scp, arr,
2722 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2725 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2727 return devip->nr_zones != 0;
2730 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2731 unsigned long long lba)
2733 u32 zno = lba >> devip->zsize_shift;
2734 struct sdeb_zone_state *zsp;
2736 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2737 return &devip->zstate[zno];
2740 * If the zone capacity is less than the zone size, adjust for gap
2743 zno = 2 * zno - devip->nr_conv_zones;
2744 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2745 zsp = &devip->zstate[zno];
2746 if (lba >= zsp->z_start + zsp->z_size)
2748 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2752 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2754 return zsp->z_type == ZBC_ZTYPE_CNV;
2757 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2759 return zsp->z_type == ZBC_ZTYPE_GAP;
2762 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2764 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2767 static void zbc_close_zone(struct sdebug_dev_info *devip,
2768 struct sdeb_zone_state *zsp)
2770 enum sdebug_z_cond zc;
2772 if (!zbc_zone_is_seq(zsp))
2776 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2779 if (zc == ZC2_IMPLICIT_OPEN)
2780 devip->nr_imp_open--;
2782 devip->nr_exp_open--;
2784 if (zsp->z_wp == zsp->z_start) {
2785 zsp->z_cond = ZC1_EMPTY;
2787 zsp->z_cond = ZC4_CLOSED;
2792 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2794 struct sdeb_zone_state *zsp = &devip->zstate[0];
2797 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2798 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2799 zbc_close_zone(devip, zsp);
2805 static void zbc_open_zone(struct sdebug_dev_info *devip,
2806 struct sdeb_zone_state *zsp, bool explicit)
2808 enum sdebug_z_cond zc;
2810 if (!zbc_zone_is_seq(zsp))
2814 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2815 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2818 /* Close an implicit open zone if necessary */
2819 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2820 zbc_close_zone(devip, zsp);
2821 else if (devip->max_open &&
2822 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2823 zbc_close_imp_open_zone(devip);
2825 if (zsp->z_cond == ZC4_CLOSED)
2828 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2829 devip->nr_exp_open++;
2831 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2832 devip->nr_imp_open++;
2836 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2837 struct sdeb_zone_state *zsp)
2839 switch (zsp->z_cond) {
2840 case ZC2_IMPLICIT_OPEN:
2841 devip->nr_imp_open--;
2843 case ZC3_EXPLICIT_OPEN:
2844 devip->nr_exp_open--;
2847 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2848 zsp->z_start, zsp->z_cond);
2851 zsp->z_cond = ZC5_FULL;
2854 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2855 unsigned long long lba, unsigned int num)
2857 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2858 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2860 if (!zbc_zone_is_seq(zsp))
2863 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2865 if (zsp->z_wp >= zend)
2866 zbc_set_zone_full(devip, zsp);
2871 if (lba != zsp->z_wp)
2872 zsp->z_non_seq_resource = true;
2878 } else if (end > zsp->z_wp) {
2884 if (zsp->z_wp >= zend)
2885 zbc_set_zone_full(devip, zsp);
2891 zend = zsp->z_start + zsp->z_size;
2896 static int check_zbc_access_params(struct scsi_cmnd *scp,
2897 unsigned long long lba, unsigned int num, bool write)
2899 struct scsi_device *sdp = scp->device;
2900 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2901 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2902 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2905 if (devip->zmodel == BLK_ZONED_HA)
2907 /* For host-managed, reads cannot cross zone types boundaries */
2908 if (zsp->z_type != zsp_end->z_type) {
2909 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2912 return check_condition_result;
2917 /* Writing into a gap zone is not allowed */
2918 if (zbc_zone_is_gap(zsp)) {
2919 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2920 ATTEMPT_ACCESS_GAP);
2921 return check_condition_result;
2924 /* No restrictions for writes within conventional zones */
2925 if (zbc_zone_is_conv(zsp)) {
2926 if (!zbc_zone_is_conv(zsp_end)) {
2927 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2929 WRITE_BOUNDARY_ASCQ);
2930 return check_condition_result;
2935 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2936 /* Writes cannot cross sequential zone boundaries */
2937 if (zsp_end != zsp) {
2938 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2940 WRITE_BOUNDARY_ASCQ);
2941 return check_condition_result;
2943 /* Cannot write full zones */
2944 if (zsp->z_cond == ZC5_FULL) {
2945 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2946 INVALID_FIELD_IN_CDB, 0);
2947 return check_condition_result;
2949 /* Writes must be aligned to the zone WP */
2950 if (lba != zsp->z_wp) {
2951 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2953 UNALIGNED_WRITE_ASCQ);
2954 return check_condition_result;
2958 /* Handle implicit open of closed and empty zones */
2959 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2960 if (devip->max_open &&
2961 devip->nr_exp_open >= devip->max_open) {
2962 mk_sense_buffer(scp, DATA_PROTECT,
2965 return check_condition_result;
2967 zbc_open_zone(devip, zsp, false);
2973 static inline int check_device_access_params
2974 (struct scsi_cmnd *scp, unsigned long long lba,
2975 unsigned int num, bool write)
2977 struct scsi_device *sdp = scp->device;
2978 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2980 if (lba + num > sdebug_capacity) {
2981 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2982 return check_condition_result;
2984 /* transfer length excessive (tie in to block limits VPD page) */
2985 if (num > sdebug_store_sectors) {
2986 /* needs work to find which cdb byte 'num' comes from */
2987 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2988 return check_condition_result;
2990 if (write && unlikely(sdebug_wp)) {
2991 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2992 return check_condition_result;
2994 if (sdebug_dev_is_zoned(devip))
2995 return check_zbc_access_params(scp, lba, num, write);
3001 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3002 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3003 * that access any of the "stores" in struct sdeb_store_info should call this
3004 * function with bug_if_fake_rw set to true.
3006 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3007 bool bug_if_fake_rw)
3009 if (sdebug_fake_rw) {
3010 BUG_ON(bug_if_fake_rw); /* See note above */
3013 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3016 /* Returns number of bytes copied or -1 if error. */
3017 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3018 u32 sg_skip, u64 lba, u32 num, bool do_write)
3021 u64 block, rest = 0;
3022 enum dma_data_direction dir;
3023 struct scsi_data_buffer *sdb = &scp->sdb;
3027 dir = DMA_TO_DEVICE;
3028 write_since_sync = true;
3030 dir = DMA_FROM_DEVICE;
3033 if (!sdb->length || !sip)
3035 if (scp->sc_data_direction != dir)
3039 block = do_div(lba, sdebug_store_sectors);
3040 if (block + num > sdebug_store_sectors)
3041 rest = block + num - sdebug_store_sectors;
3043 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3044 fsp + (block * sdebug_sector_size),
3045 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3046 if (ret != (num - rest) * sdebug_sector_size)
3050 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3051 fsp, rest * sdebug_sector_size,
3052 sg_skip + ((num - rest) * sdebug_sector_size),
3059 /* Returns number of bytes copied or -1 if error. */
3060 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3062 struct scsi_data_buffer *sdb = &scp->sdb;
3066 if (scp->sc_data_direction != DMA_TO_DEVICE)
3068 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3069 num * sdebug_sector_size, 0, true);
3072 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3073 * arr into sip->storep+lba and return true. If comparison fails then
3075 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3076 const u8 *arr, bool compare_only)
3079 u64 block, rest = 0;
3080 u32 store_blks = sdebug_store_sectors;
3081 u32 lb_size = sdebug_sector_size;
3082 u8 *fsp = sip->storep;
3084 block = do_div(lba, store_blks);
3085 if (block + num > store_blks)
3086 rest = block + num - store_blks;
3088 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3092 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3098 arr += num * lb_size;
3099 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3101 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3105 static __be16 dif_compute_csum(const void *buf, int len)
3110 csum = (__force __be16)ip_compute_csum(buf, len);
3112 csum = cpu_to_be16(crc_t10dif(buf, len));
3117 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3118 sector_t sector, u32 ei_lba)
3120 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3122 if (sdt->guard_tag != csum) {
3123 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3124 (unsigned long)sector,
3125 be16_to_cpu(sdt->guard_tag),
3129 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3130 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3131 pr_err("REF check failed on sector %lu\n",
3132 (unsigned long)sector);
3135 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3136 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3137 pr_err("REF check failed on sector %lu\n",
3138 (unsigned long)sector);
3144 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3145 unsigned int sectors, bool read)
3149 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3150 scp->device->hostdata, true);
3151 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3152 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3153 struct sg_mapping_iter miter;
3155 /* Bytes of protection data to copy into sgl */
3156 resid = sectors * sizeof(*dif_storep);
3158 sg_miter_start(&miter, scsi_prot_sglist(scp),
3159 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3160 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3162 while (sg_miter_next(&miter) && resid > 0) {
3163 size_t len = min_t(size_t, miter.length, resid);
3164 void *start = dif_store(sip, sector);
3167 if (dif_store_end < start + len)
3168 rest = start + len - dif_store_end;
3173 memcpy(paddr, start, len - rest);
3175 memcpy(start, paddr, len - rest);
3179 memcpy(paddr + len - rest, dif_storep, rest);
3181 memcpy(dif_storep, paddr + len - rest, rest);
3184 sector += len / sizeof(*dif_storep);
3187 sg_miter_stop(&miter);
3190 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3191 unsigned int sectors, u32 ei_lba)
3196 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3197 scp->device->hostdata, true);
3198 struct t10_pi_tuple *sdt;
3200 for (i = 0; i < sectors; i++, ei_lba++) {
3201 sector = start_sec + i;
3202 sdt = dif_store(sip, sector);
3204 if (sdt->app_tag == cpu_to_be16(0xffff))
3208 * Because scsi_debug acts as both initiator and
3209 * target we proceed to verify the PI even if
3210 * RDPROTECT=3. This is done so the "initiator" knows
3211 * which type of error to return. Otherwise we would
3212 * have to iterate over the PI twice.
3214 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3215 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3224 dif_copy_prot(scp, start_sec, sectors, true);
3231 sdeb_read_lock(struct sdeb_store_info *sip)
3233 if (sdebug_no_rwlock) {
3235 __acquire(&sip->macc_lck);
3237 __acquire(&sdeb_fake_rw_lck);
3240 read_lock(&sip->macc_lck);
3242 read_lock(&sdeb_fake_rw_lck);
3247 sdeb_read_unlock(struct sdeb_store_info *sip)
3249 if (sdebug_no_rwlock) {
3251 __release(&sip->macc_lck);
3253 __release(&sdeb_fake_rw_lck);
3256 read_unlock(&sip->macc_lck);
3258 read_unlock(&sdeb_fake_rw_lck);
3263 sdeb_write_lock(struct sdeb_store_info *sip)
3265 if (sdebug_no_rwlock) {
3267 __acquire(&sip->macc_lck);
3269 __acquire(&sdeb_fake_rw_lck);
3272 write_lock(&sip->macc_lck);
3274 write_lock(&sdeb_fake_rw_lck);
3279 sdeb_write_unlock(struct sdeb_store_info *sip)
3281 if (sdebug_no_rwlock) {
3283 __release(&sip->macc_lck);
3285 __release(&sdeb_fake_rw_lck);
3288 write_unlock(&sip->macc_lck);
3290 write_unlock(&sdeb_fake_rw_lck);
3294 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3301 struct sdeb_store_info *sip = devip2sip(devip, true);
3302 u8 *cmd = scp->cmnd;
3307 lba = get_unaligned_be64(cmd + 2);
3308 num = get_unaligned_be32(cmd + 10);
3313 lba = get_unaligned_be32(cmd + 2);
3314 num = get_unaligned_be16(cmd + 7);
3319 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3320 (u32)(cmd[1] & 0x1f) << 16;
3321 num = (0 == cmd[4]) ? 256 : cmd[4];
3326 lba = get_unaligned_be32(cmd + 2);
3327 num = get_unaligned_be32(cmd + 6);
3330 case XDWRITEREAD_10:
3332 lba = get_unaligned_be32(cmd + 2);
3333 num = get_unaligned_be16(cmd + 7);
3336 default: /* assume READ(32) */
3337 lba = get_unaligned_be64(cmd + 12);
3338 ei_lba = get_unaligned_be32(cmd + 20);
3339 num = get_unaligned_be32(cmd + 28);
3343 if (unlikely(have_dif_prot && check_prot)) {
3344 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3346 mk_sense_invalid_opcode(scp);
3347 return check_condition_result;
3349 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3350 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3351 (cmd[1] & 0xe0) == 0)
3352 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3355 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3356 atomic_read(&sdeb_inject_pending))) {
3358 atomic_set(&sdeb_inject_pending, 0);
3361 ret = check_device_access_params(scp, lba, num, false);
3364 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3365 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3366 ((lba + num) > sdebug_medium_error_start))) {
3367 /* claim unrecoverable read error */
3368 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3369 /* set info field and valid bit for fixed descriptor */
3370 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3371 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3372 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3373 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3374 put_unaligned_be32(ret, scp->sense_buffer + 3);
3376 scsi_set_resid(scp, scsi_bufflen(scp));
3377 return check_condition_result;
3380 sdeb_read_lock(sip);
3383 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3384 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3385 case 1: /* Guard tag error */
3386 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3387 sdeb_read_unlock(sip);
3388 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3389 return check_condition_result;
3390 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3391 sdeb_read_unlock(sip);
3392 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3393 return illegal_condition_result;
3396 case 3: /* Reference tag error */
3397 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3398 sdeb_read_unlock(sip);
3399 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3400 return check_condition_result;
3401 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3402 sdeb_read_unlock(sip);
3403 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3404 return illegal_condition_result;
3410 ret = do_device_access(sip, scp, 0, lba, num, false);
3411 sdeb_read_unlock(sip);
3412 if (unlikely(ret == -1))
3413 return DID_ERROR << 16;
3415 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3417 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3418 atomic_read(&sdeb_inject_pending))) {
3419 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3420 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3421 atomic_set(&sdeb_inject_pending, 0);
3422 return check_condition_result;
3423 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3424 /* Logical block guard check failed */
3425 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3426 atomic_set(&sdeb_inject_pending, 0);
3427 return illegal_condition_result;
3428 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3429 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3430 atomic_set(&sdeb_inject_pending, 0);
3431 return illegal_condition_result;
3437 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3438 unsigned int sectors, u32 ei_lba)
3441 struct t10_pi_tuple *sdt;
3443 sector_t sector = start_sec;
3446 struct sg_mapping_iter diter;
3447 struct sg_mapping_iter piter;
3449 BUG_ON(scsi_sg_count(SCpnt) == 0);
3450 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3452 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3453 scsi_prot_sg_count(SCpnt),
3454 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3455 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3456 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3458 /* For each protection page */
3459 while (sg_miter_next(&piter)) {
3461 if (WARN_ON(!sg_miter_next(&diter))) {
3466 for (ppage_offset = 0; ppage_offset < piter.length;
3467 ppage_offset += sizeof(struct t10_pi_tuple)) {
3468 /* If we're at the end of the current
3469 * data page advance to the next one
3471 if (dpage_offset >= diter.length) {
3472 if (WARN_ON(!sg_miter_next(&diter))) {
3479 sdt = piter.addr + ppage_offset;
3480 daddr = diter.addr + dpage_offset;
3482 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3483 ret = dif_verify(sdt, daddr, sector, ei_lba);
3490 dpage_offset += sdebug_sector_size;
3492 diter.consumed = dpage_offset;
3493 sg_miter_stop(&diter);
3495 sg_miter_stop(&piter);
3497 dif_copy_prot(SCpnt, start_sec, sectors, false);
3504 sg_miter_stop(&diter);
3505 sg_miter_stop(&piter);
3509 static unsigned long lba_to_map_index(sector_t lba)
3511 if (sdebug_unmap_alignment)
3512 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3513 sector_div(lba, sdebug_unmap_granularity);
3517 static sector_t map_index_to_lba(unsigned long index)
3519 sector_t lba = index * sdebug_unmap_granularity;
3521 if (sdebug_unmap_alignment)
3522 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3526 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3530 unsigned int mapped;
3531 unsigned long index;
3534 index = lba_to_map_index(lba);
3535 mapped = test_bit(index, sip->map_storep);
3538 next = find_next_zero_bit(sip->map_storep, map_size, index);
3540 next = find_next_bit(sip->map_storep, map_size, index);
3542 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3547 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3550 sector_t end = lba + len;
3553 unsigned long index = lba_to_map_index(lba);
3555 if (index < map_size)
3556 set_bit(index, sip->map_storep);
3558 lba = map_index_to_lba(index + 1);
3562 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3565 sector_t end = lba + len;
3566 u8 *fsp = sip->storep;
3569 unsigned long index = lba_to_map_index(lba);
3571 if (lba == map_index_to_lba(index) &&
3572 lba + sdebug_unmap_granularity <= end &&
3574 clear_bit(index, sip->map_storep);
3575 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3576 memset(fsp + lba * sdebug_sector_size,
3577 (sdebug_lbprz & 1) ? 0 : 0xff,
3578 sdebug_sector_size *
3579 sdebug_unmap_granularity);
3581 if (sip->dif_storep) {
3582 memset(sip->dif_storep + lba, 0xff,
3583 sizeof(*sip->dif_storep) *
3584 sdebug_unmap_granularity);
3587 lba = map_index_to_lba(index + 1);
3591 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3598 struct sdeb_store_info *sip = devip2sip(devip, true);
3599 u8 *cmd = scp->cmnd;
3604 lba = get_unaligned_be64(cmd + 2);
3605 num = get_unaligned_be32(cmd + 10);
3610 lba = get_unaligned_be32(cmd + 2);
3611 num = get_unaligned_be16(cmd + 7);
3616 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3617 (u32)(cmd[1] & 0x1f) << 16;
3618 num = (0 == cmd[4]) ? 256 : cmd[4];
3623 lba = get_unaligned_be32(cmd + 2);
3624 num = get_unaligned_be32(cmd + 6);
3627 case 0x53: /* XDWRITEREAD(10) */
3629 lba = get_unaligned_be32(cmd + 2);
3630 num = get_unaligned_be16(cmd + 7);
3633 default: /* assume WRITE(32) */
3634 lba = get_unaligned_be64(cmd + 12);
3635 ei_lba = get_unaligned_be32(cmd + 20);
3636 num = get_unaligned_be32(cmd + 28);
3640 if (unlikely(have_dif_prot && check_prot)) {
3641 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3643 mk_sense_invalid_opcode(scp);
3644 return check_condition_result;
3646 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3647 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3648 (cmd[1] & 0xe0) == 0)
3649 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3653 sdeb_write_lock(sip);
3654 ret = check_device_access_params(scp, lba, num, true);
3656 sdeb_write_unlock(sip);
3661 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3662 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3663 case 1: /* Guard tag error */
3664 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3665 sdeb_write_unlock(sip);
3666 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3667 return illegal_condition_result;
3668 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3669 sdeb_write_unlock(sip);
3670 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3671 return check_condition_result;
3674 case 3: /* Reference tag error */
3675 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3676 sdeb_write_unlock(sip);
3677 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3678 return illegal_condition_result;
3679 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3680 sdeb_write_unlock(sip);
3681 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3682 return check_condition_result;
3688 ret = do_device_access(sip, scp, 0, lba, num, true);
3689 if (unlikely(scsi_debug_lbp()))
3690 map_region(sip, lba, num);
3691 /* If ZBC zone then bump its write pointer */
3692 if (sdebug_dev_is_zoned(devip))
3693 zbc_inc_wp(devip, lba, num);
3694 sdeb_write_unlock(sip);
3695 if (unlikely(-1 == ret))
3696 return DID_ERROR << 16;
3697 else if (unlikely(sdebug_verbose &&
3698 (ret < (num * sdebug_sector_size))))
3699 sdev_printk(KERN_INFO, scp->device,
3700 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3701 my_name, num * sdebug_sector_size, ret);
3703 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3704 atomic_read(&sdeb_inject_pending))) {
3705 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3706 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3707 atomic_set(&sdeb_inject_pending, 0);
3708 return check_condition_result;
3709 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3710 /* Logical block guard check failed */
3711 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3712 atomic_set(&sdeb_inject_pending, 0);
3713 return illegal_condition_result;
3714 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3715 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3716 atomic_set(&sdeb_inject_pending, 0);
3717 return illegal_condition_result;
3724 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3725 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3727 static int resp_write_scat(struct scsi_cmnd *scp,
3728 struct sdebug_dev_info *devip)
3730 u8 *cmd = scp->cmnd;
3733 struct sdeb_store_info *sip = devip2sip(devip, true);
3735 u16 lbdof, num_lrd, k;
3736 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3737 u32 lb_size = sdebug_sector_size;
3742 static const u32 lrd_size = 32; /* + parameter list header size */
3744 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3746 wrprotect = (cmd[10] >> 5) & 0x7;
3747 lbdof = get_unaligned_be16(cmd + 12);
3748 num_lrd = get_unaligned_be16(cmd + 16);
3749 bt_len = get_unaligned_be32(cmd + 28);
3750 } else { /* that leaves WRITE SCATTERED(16) */
3752 wrprotect = (cmd[2] >> 5) & 0x7;
3753 lbdof = get_unaligned_be16(cmd + 4);
3754 num_lrd = get_unaligned_be16(cmd + 8);
3755 bt_len = get_unaligned_be32(cmd + 10);
3756 if (unlikely(have_dif_prot)) {
3757 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3759 mk_sense_invalid_opcode(scp);
3760 return illegal_condition_result;
3762 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3763 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3765 sdev_printk(KERN_ERR, scp->device,
3766 "Unprotected WR to DIF device\n");
3769 if ((num_lrd == 0) || (bt_len == 0))
3770 return 0; /* T10 says these do-nothings are not errors */
3773 sdev_printk(KERN_INFO, scp->device,
3774 "%s: %s: LB Data Offset field bad\n",
3776 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3777 return illegal_condition_result;
3779 lbdof_blen = lbdof * lb_size;
3780 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3782 sdev_printk(KERN_INFO, scp->device,
3783 "%s: %s: LBA range descriptors don't fit\n",
3785 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3786 return illegal_condition_result;
3788 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3790 return SCSI_MLQUEUE_HOST_BUSY;
3792 sdev_printk(KERN_INFO, scp->device,
3793 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3794 my_name, __func__, lbdof_blen);
3795 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3797 ret = DID_ERROR << 16;
3801 sdeb_write_lock(sip);
3802 sg_off = lbdof_blen;
3803 /* Spec says Buffer xfer Length field in number of LBs in dout */
3805 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3806 lba = get_unaligned_be64(up + 0);
3807 num = get_unaligned_be32(up + 8);
3809 sdev_printk(KERN_INFO, scp->device,
3810 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3811 my_name, __func__, k, lba, num, sg_off);
3814 ret = check_device_access_params(scp, lba, num, true);
3816 goto err_out_unlock;
3817 num_by = num * lb_size;
3818 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3820 if ((cum_lb + num) > bt_len) {
3822 sdev_printk(KERN_INFO, scp->device,
3823 "%s: %s: sum of blocks > data provided\n",
3825 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3827 ret = illegal_condition_result;
3828 goto err_out_unlock;
3832 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3833 int prot_ret = prot_verify_write(scp, lba, num,
3837 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3839 ret = illegal_condition_result;
3840 goto err_out_unlock;
3844 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3845 /* If ZBC zone then bump its write pointer */
3846 if (sdebug_dev_is_zoned(devip))
3847 zbc_inc_wp(devip, lba, num);
3848 if (unlikely(scsi_debug_lbp()))
3849 map_region(sip, lba, num);
3850 if (unlikely(-1 == ret)) {
3851 ret = DID_ERROR << 16;
3852 goto err_out_unlock;
3853 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3854 sdev_printk(KERN_INFO, scp->device,
3855 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3856 my_name, num_by, ret);
3858 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3859 atomic_read(&sdeb_inject_pending))) {
3860 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3861 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3862 atomic_set(&sdeb_inject_pending, 0);
3863 ret = check_condition_result;
3864 goto err_out_unlock;
3865 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3866 /* Logical block guard check failed */
3867 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3868 atomic_set(&sdeb_inject_pending, 0);
3869 ret = illegal_condition_result;
3870 goto err_out_unlock;
3871 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3872 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3873 atomic_set(&sdeb_inject_pending, 0);
3874 ret = illegal_condition_result;
3875 goto err_out_unlock;
3883 sdeb_write_unlock(sip);
3889 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3890 u32 ei_lba, bool unmap, bool ndob)
3892 struct scsi_device *sdp = scp->device;
3893 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3894 unsigned long long i;
3896 u32 lb_size = sdebug_sector_size;
3898 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3899 scp->device->hostdata, true);
3903 sdeb_write_lock(sip);
3905 ret = check_device_access_params(scp, lba, num, true);
3907 sdeb_write_unlock(sip);
3911 if (unmap && scsi_debug_lbp()) {
3912 unmap_region(sip, lba, num);
3916 block = do_div(lbaa, sdebug_store_sectors);
3917 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3919 fs1p = fsp + (block * lb_size);
3921 memset(fs1p, 0, lb_size);
3924 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3927 sdeb_write_unlock(sip);
3928 return DID_ERROR << 16;
3929 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3930 sdev_printk(KERN_INFO, scp->device,
3931 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3932 my_name, "write same", lb_size, ret);
3934 /* Copy first sector to remaining blocks */
3935 for (i = 1 ; i < num ; i++) {
3937 block = do_div(lbaa, sdebug_store_sectors);
3938 memmove(fsp + (block * lb_size), fs1p, lb_size);
3940 if (scsi_debug_lbp())
3941 map_region(sip, lba, num);
3942 /* If ZBC zone then bump its write pointer */
3943 if (sdebug_dev_is_zoned(devip))
3944 zbc_inc_wp(devip, lba, num);
3946 sdeb_write_unlock(sip);
3951 static int resp_write_same_10(struct scsi_cmnd *scp,
3952 struct sdebug_dev_info *devip)
3954 u8 *cmd = scp->cmnd;
3961 if (sdebug_lbpws10 == 0) {
3962 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3963 return check_condition_result;
3967 lba = get_unaligned_be32(cmd + 2);
3968 num = get_unaligned_be16(cmd + 7);
3969 if (num > sdebug_write_same_length) {
3970 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3971 return check_condition_result;
3973 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3976 static int resp_write_same_16(struct scsi_cmnd *scp,
3977 struct sdebug_dev_info *devip)
3979 u8 *cmd = scp->cmnd;
3986 if (cmd[1] & 0x8) { /* UNMAP */
3987 if (sdebug_lbpws == 0) {
3988 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3989 return check_condition_result;
3993 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3995 lba = get_unaligned_be64(cmd + 2);
3996 num = get_unaligned_be32(cmd + 10);
3997 if (num > sdebug_write_same_length) {
3998 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3999 return check_condition_result;
4001 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4004 /* Note the mode field is in the same position as the (lower) service action
4005 * field. For the Report supported operation codes command, SPC-4 suggests
4006 * each mode of this command should be reported separately; for future. */
4007 static int resp_write_buffer(struct scsi_cmnd *scp,
4008 struct sdebug_dev_info *devip)
4010 u8 *cmd = scp->cmnd;
4011 struct scsi_device *sdp = scp->device;
4012 struct sdebug_dev_info *dp;
4015 mode = cmd[1] & 0x1f;
4017 case 0x4: /* download microcode (MC) and activate (ACT) */
4018 /* set UAs on this device only */
4019 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4020 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4022 case 0x5: /* download MC, save and ACT */
4023 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4025 case 0x6: /* download MC with offsets and ACT */
4026 /* set UAs on most devices (LUs) in this target */
4027 list_for_each_entry(dp,
4028 &devip->sdbg_host->dev_info_list,
4030 if (dp->target == sdp->id) {
4031 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4033 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4037 case 0x7: /* download MC with offsets, save, and ACT */
4038 /* set UA on all devices (LUs) in this target */
4039 list_for_each_entry(dp,
4040 &devip->sdbg_host->dev_info_list,
4042 if (dp->target == sdp->id)
4043 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4047 /* do nothing for this command for other mode values */
4053 static int resp_comp_write(struct scsi_cmnd *scp,
4054 struct sdebug_dev_info *devip)
4056 u8 *cmd = scp->cmnd;
4058 struct sdeb_store_info *sip = devip2sip(devip, true);
4061 u32 lb_size = sdebug_sector_size;
4066 lba = get_unaligned_be64(cmd + 2);
4067 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4069 return 0; /* degenerate case, not an error */
4070 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4072 mk_sense_invalid_opcode(scp);
4073 return check_condition_result;
4075 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4076 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4077 (cmd[1] & 0xe0) == 0)
4078 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4080 ret = check_device_access_params(scp, lba, num, false);
4084 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4086 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4088 return check_condition_result;
4091 sdeb_write_lock(sip);
4093 ret = do_dout_fetch(scp, dnum, arr);
4095 retval = DID_ERROR << 16;
4097 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4098 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4099 "indicated=%u, IO sent=%d bytes\n", my_name,
4100 dnum * lb_size, ret);
4101 if (!comp_write_worker(sip, lba, num, arr, false)) {
4102 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4103 retval = check_condition_result;
4106 if (scsi_debug_lbp())
4107 map_region(sip, lba, num);
4109 sdeb_write_unlock(sip);
4114 struct unmap_block_desc {
4120 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4123 struct unmap_block_desc *desc;
4124 struct sdeb_store_info *sip = devip2sip(devip, true);
4125 unsigned int i, payload_len, descriptors;
4128 if (!scsi_debug_lbp())
4129 return 0; /* fib and say its done */
4130 payload_len = get_unaligned_be16(scp->cmnd + 7);
4131 BUG_ON(scsi_bufflen(scp) != payload_len);
4133 descriptors = (payload_len - 8) / 16;
4134 if (descriptors > sdebug_unmap_max_desc) {
4135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4136 return check_condition_result;
4139 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4141 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4143 return check_condition_result;
4146 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4148 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4149 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4151 desc = (void *)&buf[8];
4153 sdeb_write_lock(sip);
4155 for (i = 0 ; i < descriptors ; i++) {
4156 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4157 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4159 ret = check_device_access_params(scp, lba, num, true);
4163 unmap_region(sip, lba, num);
4169 sdeb_write_unlock(sip);
4175 #define SDEBUG_GET_LBA_STATUS_LEN 32
4177 static int resp_get_lba_status(struct scsi_cmnd *scp,
4178 struct sdebug_dev_info *devip)
4180 u8 *cmd = scp->cmnd;
4182 u32 alloc_len, mapped, num;
4184 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4186 lba = get_unaligned_be64(cmd + 2);
4187 alloc_len = get_unaligned_be32(cmd + 10);
4192 ret = check_device_access_params(scp, lba, 1, false);
4196 if (scsi_debug_lbp()) {
4197 struct sdeb_store_info *sip = devip2sip(devip, true);
4199 mapped = map_state(sip, lba, &num);
4202 /* following just in case virtual_gb changed */
4203 sdebug_capacity = get_sdebug_capacity();
4204 if (sdebug_capacity - lba <= 0xffffffff)
4205 num = sdebug_capacity - lba;
4210 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4211 put_unaligned_be32(20, arr); /* Parameter Data Length */
4212 put_unaligned_be64(lba, arr + 8); /* LBA */
4213 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4214 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4216 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4219 static int resp_sync_cache(struct scsi_cmnd *scp,
4220 struct sdebug_dev_info *devip)
4225 u8 *cmd = scp->cmnd;
4227 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4228 lba = get_unaligned_be32(cmd + 2);
4229 num_blocks = get_unaligned_be16(cmd + 7);
4230 } else { /* SYNCHRONIZE_CACHE(16) */
4231 lba = get_unaligned_be64(cmd + 2);
4232 num_blocks = get_unaligned_be32(cmd + 10);
4234 if (lba + num_blocks > sdebug_capacity) {
4235 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4236 return check_condition_result;
4238 if (!write_since_sync || (cmd[1] & 0x2))
4239 res = SDEG_RES_IMMED_MASK;
4240 else /* delay if write_since_sync and IMMED clear */
4241 write_since_sync = false;
4246 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4247 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4248 * a GOOD status otherwise. Model a disk with a big cache and yield
4249 * CONDITION MET. Actually tries to bring range in main memory into the
4250 * cache associated with the CPU(s).
4252 static int resp_pre_fetch(struct scsi_cmnd *scp,
4253 struct sdebug_dev_info *devip)
4257 u64 block, rest = 0;
4259 u8 *cmd = scp->cmnd;
4260 struct sdeb_store_info *sip = devip2sip(devip, true);
4261 u8 *fsp = sip->storep;
4263 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4264 lba = get_unaligned_be32(cmd + 2);
4265 nblks = get_unaligned_be16(cmd + 7);
4266 } else { /* PRE-FETCH(16) */
4267 lba = get_unaligned_be64(cmd + 2);
4268 nblks = get_unaligned_be32(cmd + 10);
4270 if (lba + nblks > sdebug_capacity) {
4271 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4272 return check_condition_result;
4276 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4277 block = do_div(lba, sdebug_store_sectors);
4278 if (block + nblks > sdebug_store_sectors)
4279 rest = block + nblks - sdebug_store_sectors;
4281 /* Try to bring the PRE-FETCH range into CPU's cache */
4282 sdeb_read_lock(sip);
4283 prefetch_range(fsp + (sdebug_sector_size * block),
4284 (nblks - rest) * sdebug_sector_size);
4286 prefetch_range(fsp, rest * sdebug_sector_size);
4287 sdeb_read_unlock(sip);
4290 res = SDEG_RES_IMMED_MASK;
4291 return res | condition_met_result;
4294 #define RL_BUCKET_ELEMS 8
4296 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4297 * (W-LUN), the normal Linux scanning logic does not associate it with a
4298 * device (e.g. /dev/sg7). The following magic will make that association:
4299 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4300 * where <n> is a host number. If there are multiple targets in a host then
4301 * the above will associate a W-LUN to each target. To only get a W-LUN
4302 * for target 2, then use "echo '- 2 49409' > scan" .
4304 static int resp_report_luns(struct scsi_cmnd *scp,
4305 struct sdebug_dev_info *devip)
4307 unsigned char *cmd = scp->cmnd;
4308 unsigned int alloc_len;
4309 unsigned char select_report;
4311 struct scsi_lun *lun_p;
4312 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4313 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4314 unsigned int wlun_cnt; /* report luns W-LUN count */
4315 unsigned int tlun_cnt; /* total LUN count */
4316 unsigned int rlen; /* response length (in bytes) */
4318 unsigned int off_rsp = 0;
4319 const int sz_lun = sizeof(struct scsi_lun);
4321 clear_luns_changed_on_target(devip);
4323 select_report = cmd[2];
4324 alloc_len = get_unaligned_be32(cmd + 6);
4326 if (alloc_len < 4) {
4327 pr_err("alloc len too small %d\n", alloc_len);
4328 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4329 return check_condition_result;
4332 switch (select_report) {
4333 case 0: /* all LUNs apart from W-LUNs */
4334 lun_cnt = sdebug_max_luns;
4337 case 1: /* only W-LUNs */
4341 case 2: /* all LUNs */
4342 lun_cnt = sdebug_max_luns;
4345 case 0x10: /* only administrative LUs */
4346 case 0x11: /* see SPC-5 */
4347 case 0x12: /* only subsiduary LUs owned by referenced LU */
4349 pr_debug("select report invalid %d\n", select_report);
4350 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4351 return check_condition_result;
4354 if (sdebug_no_lun_0 && (lun_cnt > 0))
4357 tlun_cnt = lun_cnt + wlun_cnt;
4358 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4359 scsi_set_resid(scp, scsi_bufflen(scp));
4360 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4361 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4363 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4364 lun = sdebug_no_lun_0 ? 1 : 0;
4365 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4366 memset(arr, 0, sizeof(arr));
4367 lun_p = (struct scsi_lun *)&arr[0];
4369 put_unaligned_be32(rlen, &arr[0]);
4373 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4374 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4376 int_to_scsilun(lun++, lun_p);
4377 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4378 lun_p->scsi_lun[0] |= 0x40;
4380 if (j < RL_BUCKET_ELEMS)
4383 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4389 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4393 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4397 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4399 bool is_bytchk3 = false;
4402 u32 vnum, a_num, off;
4403 const u32 lb_size = sdebug_sector_size;
4406 u8 *cmd = scp->cmnd;
4407 struct sdeb_store_info *sip = devip2sip(devip, true);
4409 bytchk = (cmd[1] >> 1) & 0x3;
4411 return 0; /* always claim internal verify okay */
4412 } else if (bytchk == 2) {
4413 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4414 return check_condition_result;
4415 } else if (bytchk == 3) {
4416 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4420 lba = get_unaligned_be64(cmd + 2);
4421 vnum = get_unaligned_be32(cmd + 10);
4423 case VERIFY: /* is VERIFY(10) */
4424 lba = get_unaligned_be32(cmd + 2);
4425 vnum = get_unaligned_be16(cmd + 7);
4428 mk_sense_invalid_opcode(scp);
4429 return check_condition_result;
4432 return 0; /* not an error */
4433 a_num = is_bytchk3 ? 1 : vnum;
4434 /* Treat following check like one for read (i.e. no write) access */
4435 ret = check_device_access_params(scp, lba, a_num, false);
4439 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4441 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4443 return check_condition_result;
4445 /* Not changing store, so only need read access */
4446 sdeb_read_lock(sip);
4448 ret = do_dout_fetch(scp, a_num, arr);
4450 ret = DID_ERROR << 16;
4452 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4453 sdev_printk(KERN_INFO, scp->device,
4454 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4455 my_name, __func__, a_num * lb_size, ret);
4458 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4459 memcpy(arr + off, arr, lb_size);
4462 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4463 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4464 ret = check_condition_result;
4468 sdeb_read_unlock(sip);
4473 #define RZONES_DESC_HD 64
4475 /* Report zones depending on start LBA and reporting options */
4476 static int resp_report_zones(struct scsi_cmnd *scp,
4477 struct sdebug_dev_info *devip)
4479 unsigned int rep_max_zones, nrz = 0;
4481 u32 alloc_len, rep_opts, rep_len;
4484 u8 *arr = NULL, *desc;
4485 u8 *cmd = scp->cmnd;
4486 struct sdeb_zone_state *zsp = NULL;
4487 struct sdeb_store_info *sip = devip2sip(devip, false);
4489 if (!sdebug_dev_is_zoned(devip)) {
4490 mk_sense_invalid_opcode(scp);
4491 return check_condition_result;
4493 zs_lba = get_unaligned_be64(cmd + 2);
4494 alloc_len = get_unaligned_be32(cmd + 10);
4496 return 0; /* not an error */
4497 rep_opts = cmd[14] & 0x3f;
4498 partial = cmd[14] & 0x80;
4500 if (zs_lba >= sdebug_capacity) {
4501 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4502 return check_condition_result;
4505 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4507 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4509 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4511 return check_condition_result;
4514 sdeb_read_lock(sip);
4517 for (lba = zs_lba; lba < sdebug_capacity;
4518 lba = zsp->z_start + zsp->z_size) {
4519 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4521 zsp = zbc_zone(devip, lba);
4528 if (zsp->z_cond != ZC1_EMPTY)
4532 /* Implicit open zones */
4533 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4537 /* Explicit open zones */
4538 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4543 if (zsp->z_cond != ZC4_CLOSED)
4548 if (zsp->z_cond != ZC5_FULL)
4555 * Read-only, offline, reset WP recommended are
4556 * not emulated: no zones to report;
4560 /* non-seq-resource set */
4561 if (!zsp->z_non_seq_resource)
4565 /* All zones except gap zones. */
4566 if (zbc_zone_is_gap(zsp))
4570 /* Not write pointer (conventional) zones */
4571 if (zbc_zone_is_seq(zsp))
4575 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4576 INVALID_FIELD_IN_CDB, 0);
4577 ret = check_condition_result;
4581 if (nrz < rep_max_zones) {
4582 /* Fill zone descriptor */
4583 desc[0] = zsp->z_type;
4584 desc[1] = zsp->z_cond << 4;
4585 if (zsp->z_non_seq_resource)
4587 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4588 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4589 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4593 if (partial && nrz >= rep_max_zones)
4600 /* Zone list length. */
4601 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4603 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4604 /* Zone starting LBA granularity. */
4605 if (devip->zcap < devip->zsize)
4606 put_unaligned_be64(devip->zsize, arr + 16);
4608 rep_len = (unsigned long)desc - (unsigned long)arr;
4609 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4612 sdeb_read_unlock(sip);
4617 /* Logic transplanted from tcmu-runner, file_zbc.c */
4618 static void zbc_open_all(struct sdebug_dev_info *devip)
4620 struct sdeb_zone_state *zsp = &devip->zstate[0];
4623 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4624 if (zsp->z_cond == ZC4_CLOSED)
4625 zbc_open_zone(devip, &devip->zstate[i], true);
4629 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4633 enum sdebug_z_cond zc;
4634 u8 *cmd = scp->cmnd;
4635 struct sdeb_zone_state *zsp;
4636 bool all = cmd[14] & 0x01;
4637 struct sdeb_store_info *sip = devip2sip(devip, false);
4639 if (!sdebug_dev_is_zoned(devip)) {
4640 mk_sense_invalid_opcode(scp);
4641 return check_condition_result;
4644 sdeb_write_lock(sip);
4647 /* Check if all closed zones can be open */
4648 if (devip->max_open &&
4649 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4650 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4652 res = check_condition_result;
4655 /* Open all closed zones */
4656 zbc_open_all(devip);
4660 /* Open the specified zone */
4661 z_id = get_unaligned_be64(cmd + 2);
4662 if (z_id >= sdebug_capacity) {
4663 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4664 res = check_condition_result;
4668 zsp = zbc_zone(devip, z_id);
4669 if (z_id != zsp->z_start) {
4670 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4671 res = check_condition_result;
4674 if (zbc_zone_is_conv(zsp)) {
4675 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4676 res = check_condition_result;
4681 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4684 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4685 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4687 res = check_condition_result;
4691 zbc_open_zone(devip, zsp, true);
4693 sdeb_write_unlock(sip);
4697 static void zbc_close_all(struct sdebug_dev_info *devip)
4701 for (i = 0; i < devip->nr_zones; i++)
4702 zbc_close_zone(devip, &devip->zstate[i]);
4705 static int resp_close_zone(struct scsi_cmnd *scp,
4706 struct sdebug_dev_info *devip)
4710 u8 *cmd = scp->cmnd;
4711 struct sdeb_zone_state *zsp;
4712 bool all = cmd[14] & 0x01;
4713 struct sdeb_store_info *sip = devip2sip(devip, false);
4715 if (!sdebug_dev_is_zoned(devip)) {
4716 mk_sense_invalid_opcode(scp);
4717 return check_condition_result;
4720 sdeb_write_lock(sip);
4723 zbc_close_all(devip);
4727 /* Close specified zone */
4728 z_id = get_unaligned_be64(cmd + 2);
4729 if (z_id >= sdebug_capacity) {
4730 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4731 res = check_condition_result;
4735 zsp = zbc_zone(devip, z_id);
4736 if (z_id != zsp->z_start) {
4737 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4738 res = check_condition_result;
4741 if (zbc_zone_is_conv(zsp)) {
4742 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4743 res = check_condition_result;
4747 zbc_close_zone(devip, zsp);
4749 sdeb_write_unlock(sip);
4753 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4754 struct sdeb_zone_state *zsp, bool empty)
4756 enum sdebug_z_cond zc = zsp->z_cond;
4758 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4759 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4760 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4761 zbc_close_zone(devip, zsp);
4762 if (zsp->z_cond == ZC4_CLOSED)
4764 zsp->z_wp = zsp->z_start + zsp->z_size;
4765 zsp->z_cond = ZC5_FULL;
4769 static void zbc_finish_all(struct sdebug_dev_info *devip)
4773 for (i = 0; i < devip->nr_zones; i++)
4774 zbc_finish_zone(devip, &devip->zstate[i], false);
4777 static int resp_finish_zone(struct scsi_cmnd *scp,
4778 struct sdebug_dev_info *devip)
4780 struct sdeb_zone_state *zsp;
4783 u8 *cmd = scp->cmnd;
4784 bool all = cmd[14] & 0x01;
4785 struct sdeb_store_info *sip = devip2sip(devip, false);
4787 if (!sdebug_dev_is_zoned(devip)) {
4788 mk_sense_invalid_opcode(scp);
4789 return check_condition_result;
4792 sdeb_write_lock(sip);
4795 zbc_finish_all(devip);
4799 /* Finish the specified zone */
4800 z_id = get_unaligned_be64(cmd + 2);
4801 if (z_id >= sdebug_capacity) {
4802 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4803 res = check_condition_result;
4807 zsp = zbc_zone(devip, z_id);
4808 if (z_id != zsp->z_start) {
4809 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4810 res = check_condition_result;
4813 if (zbc_zone_is_conv(zsp)) {
4814 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4815 res = check_condition_result;
4819 zbc_finish_zone(devip, zsp, true);
4821 sdeb_write_unlock(sip);
4825 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4826 struct sdeb_zone_state *zsp)
4828 enum sdebug_z_cond zc;
4829 struct sdeb_store_info *sip = devip2sip(devip, false);
4831 if (!zbc_zone_is_seq(zsp))
4835 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4836 zbc_close_zone(devip, zsp);
4838 if (zsp->z_cond == ZC4_CLOSED)
4841 if (zsp->z_wp > zsp->z_start)
4842 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4843 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4845 zsp->z_non_seq_resource = false;
4846 zsp->z_wp = zsp->z_start;
4847 zsp->z_cond = ZC1_EMPTY;
4850 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4854 for (i = 0; i < devip->nr_zones; i++)
4855 zbc_rwp_zone(devip, &devip->zstate[i]);
4858 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4860 struct sdeb_zone_state *zsp;
4863 u8 *cmd = scp->cmnd;
4864 bool all = cmd[14] & 0x01;
4865 struct sdeb_store_info *sip = devip2sip(devip, false);
4867 if (!sdebug_dev_is_zoned(devip)) {
4868 mk_sense_invalid_opcode(scp);
4869 return check_condition_result;
4872 sdeb_write_lock(sip);
4879 z_id = get_unaligned_be64(cmd + 2);
4880 if (z_id >= sdebug_capacity) {
4881 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4882 res = check_condition_result;
4886 zsp = zbc_zone(devip, z_id);
4887 if (z_id != zsp->z_start) {
4888 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4889 res = check_condition_result;
4892 if (zbc_zone_is_conv(zsp)) {
4893 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4894 res = check_condition_result;
4898 zbc_rwp_zone(devip, zsp);
4900 sdeb_write_unlock(sip);
4904 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4907 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4909 hwq = blk_mq_unique_tag_to_hwq(tag);
4911 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4912 if (WARN_ON_ONCE(hwq >= submit_queues))
4915 return sdebug_q_arr + hwq;
4918 static u32 get_tag(struct scsi_cmnd *cmnd)
4920 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4923 /* Queued (deferred) command completions converge here. */
4924 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4926 bool aborted = sd_dp->aborted;
4929 unsigned long iflags;
4930 struct sdebug_queue *sqp;
4931 struct sdebug_queued_cmd *sqcp;
4932 struct scsi_cmnd *scp;
4933 struct sdebug_dev_info *devip;
4935 if (unlikely(aborted))
4936 sd_dp->aborted = false;
4937 qc_idx = sd_dp->qc_idx;
4938 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4939 if (sdebug_statistics) {
4940 atomic_inc(&sdebug_completions);
4941 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4942 atomic_inc(&sdebug_miss_cpus);
4944 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4945 pr_err("wild qc_idx=%d\n", qc_idx);
4948 spin_lock_irqsave(&sqp->qc_lock, iflags);
4949 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4950 sqcp = &sqp->qc_arr[qc_idx];
4952 if (unlikely(scp == NULL)) {
4953 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4954 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4955 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4958 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4960 atomic_dec(&devip->num_in_q);
4962 pr_err("devip=NULL\n");
4963 if (unlikely(atomic_read(&retired_max_queue) > 0))
4966 sqcp->a_cmnd = NULL;
4967 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4968 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4969 pr_err("Unexpected completion\n");
4973 if (unlikely(retiring)) { /* user has reduced max_queue */
4976 retval = atomic_read(&retired_max_queue);
4977 if (qc_idx >= retval) {
4978 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4979 pr_err("index %d too large\n", retval);
4982 k = find_last_bit(sqp->in_use_bm, retval);
4983 if ((k < sdebug_max_queue) || (k == retval))
4984 atomic_set(&retired_max_queue, 0);
4986 atomic_set(&retired_max_queue, k + 1);
4988 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4989 if (unlikely(aborted)) {
4991 pr_info("bypassing scsi_done() due to aborted cmd\n");
4994 scsi_done(scp); /* callback to mid level */
4997 /* When high resolution timer goes off this function is called. */
4998 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5000 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5002 sdebug_q_cmd_complete(sd_dp);
5003 return HRTIMER_NORESTART;
5006 /* When work queue schedules work, it calls this function. */
5007 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5009 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5011 sdebug_q_cmd_complete(sd_dp);
5014 static bool got_shared_uuid;
5015 static uuid_t shared_uuid;
5017 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5019 struct sdeb_zone_state *zsp;
5020 sector_t capacity = get_sdebug_capacity();
5021 sector_t conv_capacity;
5022 sector_t zstart = 0;
5026 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5027 * a zone size allowing for at least 4 zones on the device. Otherwise,
5028 * use the specified zone size checking that at least 2 zones can be
5029 * created for the device.
5031 if (!sdeb_zbc_zone_size_mb) {
5032 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5033 >> ilog2(sdebug_sector_size);
5034 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5036 if (devip->zsize < 2) {
5037 pr_err("Device capacity too small\n");
5041 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5042 pr_err("Zone size is not a power of 2\n");
5045 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5046 >> ilog2(sdebug_sector_size);
5047 if (devip->zsize >= capacity) {
5048 pr_err("Zone size too large for device capacity\n");
5053 devip->zsize_shift = ilog2(devip->zsize);
5054 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5056 if (sdeb_zbc_zone_cap_mb == 0) {
5057 devip->zcap = devip->zsize;
5059 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5060 ilog2(sdebug_sector_size);
5061 if (devip->zcap > devip->zsize) {
5062 pr_err("Zone capacity too large\n");
5067 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5068 if (conv_capacity >= capacity) {
5069 pr_err("Number of conventional zones too large\n");
5072 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5073 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5075 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5077 /* Add gap zones if zone capacity is smaller than the zone size */
5078 if (devip->zcap < devip->zsize)
5079 devip->nr_zones += devip->nr_seq_zones;
5081 if (devip->zmodel == BLK_ZONED_HM) {
5082 /* zbc_max_open_zones can be 0, meaning "not reported" */
5083 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5084 devip->max_open = (devip->nr_zones - 1) / 2;
5086 devip->max_open = sdeb_zbc_max_open;
5089 devip->zstate = kcalloc(devip->nr_zones,
5090 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5094 for (i = 0; i < devip->nr_zones; i++) {
5095 zsp = &devip->zstate[i];
5097 zsp->z_start = zstart;
5099 if (i < devip->nr_conv_zones) {
5100 zsp->z_type = ZBC_ZTYPE_CNV;
5101 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5102 zsp->z_wp = (sector_t)-1;
5104 min_t(u64, devip->zsize, capacity - zstart);
5105 } else if ((zstart & (devip->zsize - 1)) == 0) {
5106 if (devip->zmodel == BLK_ZONED_HM)
5107 zsp->z_type = ZBC_ZTYPE_SWR;
5109 zsp->z_type = ZBC_ZTYPE_SWP;
5110 zsp->z_cond = ZC1_EMPTY;
5111 zsp->z_wp = zsp->z_start;
5113 min_t(u64, devip->zcap, capacity - zstart);
5115 zsp->z_type = ZBC_ZTYPE_GAP;
5116 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5117 zsp->z_wp = (sector_t)-1;
5118 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5122 WARN_ON_ONCE((int)zsp->z_size <= 0);
5123 zstart += zsp->z_size;
5129 static struct sdebug_dev_info *sdebug_device_create(
5130 struct sdebug_host_info *sdbg_host, gfp_t flags)
5132 struct sdebug_dev_info *devip;
5134 devip = kzalloc(sizeof(*devip), flags);
5136 if (sdebug_uuid_ctl == 1)
5137 uuid_gen(&devip->lu_name);
5138 else if (sdebug_uuid_ctl == 2) {
5139 if (got_shared_uuid)
5140 devip->lu_name = shared_uuid;
5142 uuid_gen(&shared_uuid);
5143 got_shared_uuid = true;
5144 devip->lu_name = shared_uuid;
5147 devip->sdbg_host = sdbg_host;
5148 if (sdeb_zbc_in_use) {
5149 devip->zmodel = sdeb_zbc_model;
5150 if (sdebug_device_create_zones(devip)) {
5155 devip->zmodel = BLK_ZONED_NONE;
5157 devip->sdbg_host = sdbg_host;
5158 devip->create_ts = ktime_get_boottime();
5159 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5160 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5165 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5167 struct sdebug_host_info *sdbg_host;
5168 struct sdebug_dev_info *open_devip = NULL;
5169 struct sdebug_dev_info *devip;
5171 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5173 pr_err("Host info NULL\n");
5177 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5178 if ((devip->used) && (devip->channel == sdev->channel) &&
5179 (devip->target == sdev->id) &&
5180 (devip->lun == sdev->lun))
5183 if ((!devip->used) && (!open_devip))
5187 if (!open_devip) { /* try and make a new one */
5188 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5190 pr_err("out of memory at line %d\n", __LINE__);
5195 open_devip->channel = sdev->channel;
5196 open_devip->target = sdev->id;
5197 open_devip->lun = sdev->lun;
5198 open_devip->sdbg_host = sdbg_host;
5199 atomic_set(&open_devip->num_in_q, 0);
5200 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5201 open_devip->used = true;
5205 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5208 pr_info("slave_alloc <%u %u %u %llu>\n",
5209 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5213 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5215 struct sdebug_dev_info *devip =
5216 (struct sdebug_dev_info *)sdp->hostdata;
5219 pr_info("slave_configure <%u %u %u %llu>\n",
5220 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5221 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5222 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5223 if (devip == NULL) {
5224 devip = find_build_dev_info(sdp);
5226 return 1; /* no resources, will be marked offline */
5228 sdp->hostdata = devip;
5230 sdp->no_uld_attach = 1;
5231 config_cdb_len(sdp);
5235 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5237 struct sdebug_dev_info *devip =
5238 (struct sdebug_dev_info *)sdp->hostdata;
5241 pr_info("slave_destroy <%u %u %u %llu>\n",
5242 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5244 /* make this slot available for re-use */
5245 devip->used = false;
5246 sdp->hostdata = NULL;
5250 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5251 enum sdeb_defer_type defer_t)
5255 if (defer_t == SDEB_DEFER_HRT)
5256 hrtimer_cancel(&sd_dp->hrt);
5257 else if (defer_t == SDEB_DEFER_WQ)
5258 cancel_work_sync(&sd_dp->ew.work);
5261 /* If @cmnd found deletes its timer or work queue and returns true; else
5263 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5265 unsigned long iflags;
5266 int j, k, qmax, r_qmax;
5267 enum sdeb_defer_type l_defer_t;
5268 struct sdebug_queue *sqp;
5269 struct sdebug_queued_cmd *sqcp;
5270 struct sdebug_dev_info *devip;
5271 struct sdebug_defer *sd_dp;
5273 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5274 spin_lock_irqsave(&sqp->qc_lock, iflags);
5275 qmax = sdebug_max_queue;
5276 r_qmax = atomic_read(&retired_max_queue);
5279 for (k = 0; k < qmax; ++k) {
5280 if (test_bit(k, sqp->in_use_bm)) {
5281 sqcp = &sqp->qc_arr[k];
5282 if (cmnd != sqcp->a_cmnd)
5285 devip = (struct sdebug_dev_info *)
5286 cmnd->device->hostdata;
5288 atomic_dec(&devip->num_in_q);
5289 sqcp->a_cmnd = NULL;
5290 sd_dp = sqcp->sd_dp;
5292 l_defer_t = READ_ONCE(sd_dp->defer_t);
5293 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5295 l_defer_t = SDEB_DEFER_NONE;
5296 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5297 stop_qc_helper(sd_dp, l_defer_t);
5298 clear_bit(k, sqp->in_use_bm);
5302 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5307 /* Deletes (stops) timers or work queues of all queued commands */
5308 static void stop_all_queued(void)
5310 unsigned long iflags;
5312 enum sdeb_defer_type l_defer_t;
5313 struct sdebug_queue *sqp;
5314 struct sdebug_queued_cmd *sqcp;
5315 struct sdebug_dev_info *devip;
5316 struct sdebug_defer *sd_dp;
5318 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5319 spin_lock_irqsave(&sqp->qc_lock, iflags);
5320 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5321 if (test_bit(k, sqp->in_use_bm)) {
5322 sqcp = &sqp->qc_arr[k];
5323 if (sqcp->a_cmnd == NULL)
5325 devip = (struct sdebug_dev_info *)
5326 sqcp->a_cmnd->device->hostdata;
5328 atomic_dec(&devip->num_in_q);
5329 sqcp->a_cmnd = NULL;
5330 sd_dp = sqcp->sd_dp;
5332 l_defer_t = READ_ONCE(sd_dp->defer_t);
5333 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5335 l_defer_t = SDEB_DEFER_NONE;
5336 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5337 stop_qc_helper(sd_dp, l_defer_t);
5338 clear_bit(k, sqp->in_use_bm);
5339 spin_lock_irqsave(&sqp->qc_lock, iflags);
5342 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5346 /* Free queued command memory on heap */
5347 static void free_all_queued(void)
5350 struct sdebug_queue *sqp;
5351 struct sdebug_queued_cmd *sqcp;
5353 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5354 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5355 sqcp = &sqp->qc_arr[k];
5362 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5368 ok = stop_queued_cmnd(SCpnt);
5369 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5370 sdev_printk(KERN_INFO, SCpnt->device,
5371 "%s: command%s found\n", __func__,
5377 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5380 if (SCpnt && SCpnt->device) {
5381 struct scsi_device *sdp = SCpnt->device;
5382 struct sdebug_dev_info *devip =
5383 (struct sdebug_dev_info *)sdp->hostdata;
5385 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5386 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5388 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5393 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5395 struct sdebug_host_info *sdbg_host;
5396 struct sdebug_dev_info *devip;
5397 struct scsi_device *sdp;
5398 struct Scsi_Host *hp;
5401 ++num_target_resets;
5404 sdp = SCpnt->device;
5407 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5408 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5412 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5414 list_for_each_entry(devip,
5415 &sdbg_host->dev_info_list,
5417 if (devip->target == sdp->id) {
5418 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5422 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5423 sdev_printk(KERN_INFO, sdp,
5424 "%s: %d device(s) found in target\n", __func__, k);
5429 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5431 struct sdebug_host_info *sdbg_host;
5432 struct sdebug_dev_info *devip;
5433 struct scsi_device *sdp;
5434 struct Scsi_Host *hp;
5438 if (!(SCpnt && SCpnt->device))
5440 sdp = SCpnt->device;
5441 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5442 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5445 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5447 list_for_each_entry(devip,
5448 &sdbg_host->dev_info_list,
5450 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5455 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5456 sdev_printk(KERN_INFO, sdp,
5457 "%s: %d device(s) found in host\n", __func__, k);
5462 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5464 struct sdebug_host_info *sdbg_host;
5465 struct sdebug_dev_info *devip;
5469 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5470 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5471 spin_lock(&sdebug_host_list_lock);
5472 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5473 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5475 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5479 spin_unlock(&sdebug_host_list_lock);
5481 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5482 sdev_printk(KERN_INFO, SCpnt->device,
5483 "%s: %d device(s) found\n", __func__, k);
5487 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5489 struct msdos_partition *pp;
5490 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5491 int sectors_per_part, num_sectors, k;
5492 int heads_by_sects, start_sec, end_sec;
5494 /* assume partition table already zeroed */
5495 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5497 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5498 sdebug_num_parts = SDEBUG_MAX_PARTS;
5499 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5501 num_sectors = (int)get_sdebug_capacity();
5502 sectors_per_part = (num_sectors - sdebug_sectors_per)
5504 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5505 starts[0] = sdebug_sectors_per;
5506 max_part_secs = sectors_per_part;
5507 for (k = 1; k < sdebug_num_parts; ++k) {
5508 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5510 if (starts[k] - starts[k - 1] < max_part_secs)
5511 max_part_secs = starts[k] - starts[k - 1];
5513 starts[sdebug_num_parts] = num_sectors;
5514 starts[sdebug_num_parts + 1] = 0;
5516 ramp[510] = 0x55; /* magic partition markings */
5518 pp = (struct msdos_partition *)(ramp + 0x1be);
5519 for (k = 0; starts[k + 1]; ++k, ++pp) {
5520 start_sec = starts[k];
5521 end_sec = starts[k] + max_part_secs - 1;
5524 pp->cyl = start_sec / heads_by_sects;
5525 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5526 / sdebug_sectors_per;
5527 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5529 pp->end_cyl = end_sec / heads_by_sects;
5530 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5531 / sdebug_sectors_per;
5532 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5534 pp->start_sect = cpu_to_le32(start_sec);
5535 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5536 pp->sys_ind = 0x83; /* plain Linux partition */
5540 static void block_unblock_all_queues(bool block)
5543 struct sdebug_queue *sqp;
5545 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5546 atomic_set(&sqp->blocked, (int)block);
5549 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5550 * commands will be processed normally before triggers occur.
5552 static void tweak_cmnd_count(void)
5556 modulo = abs(sdebug_every_nth);
5559 block_unblock_all_queues(true);
5560 count = atomic_read(&sdebug_cmnd_count);
5561 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5562 block_unblock_all_queues(false);
5565 static void clear_queue_stats(void)
5567 atomic_set(&sdebug_cmnd_count, 0);
5568 atomic_set(&sdebug_completions, 0);
5569 atomic_set(&sdebug_miss_cpus, 0);
5570 atomic_set(&sdebug_a_tsf, 0);
5573 static bool inject_on_this_cmd(void)
5575 if (sdebug_every_nth == 0)
5577 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5580 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5582 /* Complete the processing of the thread that queued a SCSI command to this
5583 * driver. It either completes the command by calling cmnd_done() or
5584 * schedules a hr timer or work queue then returns 0. Returns
5585 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5587 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5589 int (*pfp)(struct scsi_cmnd *,
5590 struct sdebug_dev_info *),
5591 int delta_jiff, int ndelay)
5594 bool inject = false;
5595 bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5596 int k, num_in_q, qdepth;
5597 unsigned long iflags;
5598 u64 ns_from_boot = 0;
5599 struct sdebug_queue *sqp;
5600 struct sdebug_queued_cmd *sqcp;
5601 struct scsi_device *sdp;
5602 struct sdebug_defer *sd_dp;
5604 if (unlikely(devip == NULL)) {
5605 if (scsi_result == 0)
5606 scsi_result = DID_NO_CONNECT << 16;
5607 goto respond_in_thread;
5611 if (delta_jiff == 0)
5612 goto respond_in_thread;
5614 sqp = get_queue(cmnd);
5615 spin_lock_irqsave(&sqp->qc_lock, iflags);
5616 if (unlikely(atomic_read(&sqp->blocked))) {
5617 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5618 return SCSI_MLQUEUE_HOST_BUSY;
5620 num_in_q = atomic_read(&devip->num_in_q);
5621 qdepth = cmnd->device->queue_depth;
5622 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5624 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5625 goto respond_in_thread;
5627 scsi_result = device_qfull_result;
5628 } else if (unlikely(sdebug_every_nth &&
5629 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5630 (scsi_result == 0))) {
5631 if ((num_in_q == (qdepth - 1)) &&
5632 (atomic_inc_return(&sdebug_a_tsf) >=
5633 abs(sdebug_every_nth))) {
5634 atomic_set(&sdebug_a_tsf, 0);
5636 scsi_result = device_qfull_result;
5640 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5641 if (unlikely(k >= sdebug_max_queue)) {
5642 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5644 goto respond_in_thread;
5645 scsi_result = device_qfull_result;
5646 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5647 sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5648 __func__, sdebug_max_queue);
5649 goto respond_in_thread;
5651 set_bit(k, sqp->in_use_bm);
5652 atomic_inc(&devip->num_in_q);
5653 sqcp = &sqp->qc_arr[k];
5654 sqcp->a_cmnd = cmnd;
5655 cmnd->host_scribble = (unsigned char *)sqcp;
5656 sd_dp = sqcp->sd_dp;
5657 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5660 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5662 atomic_dec(&devip->num_in_q);
5663 clear_bit(k, sqp->in_use_bm);
5664 return SCSI_MLQUEUE_HOST_BUSY;
5671 /* Set the hostwide tag */
5672 if (sdebug_host_max_queue)
5673 sd_dp->hc_idx = get_tag(cmnd);
5676 ns_from_boot = ktime_get_boottime_ns();
5678 /* one of the resp_*() response functions is called here */
5679 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5680 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5681 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5682 delta_jiff = ndelay = 0;
5684 if (cmnd->result == 0 && scsi_result != 0)
5685 cmnd->result = scsi_result;
5686 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5687 if (atomic_read(&sdeb_inject_pending)) {
5688 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5689 atomic_set(&sdeb_inject_pending, 0);
5690 cmnd->result = check_condition_result;
5694 if (unlikely(sdebug_verbose && cmnd->result))
5695 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5696 __func__, cmnd->result);
5698 if (delta_jiff > 0 || ndelay > 0) {
5701 if (delta_jiff > 0) {
5702 u64 ns = jiffies_to_nsecs(delta_jiff);
5704 if (sdebug_random && ns < U32_MAX) {
5705 ns = prandom_u32_max((u32)ns);
5706 } else if (sdebug_random) {
5707 ns >>= 12; /* scale to 4 usec precision */
5708 if (ns < U32_MAX) /* over 4 hours max */
5709 ns = prandom_u32_max((u32)ns);
5712 kt = ns_to_ktime(ns);
5713 } else { /* ndelay has a 4.2 second max */
5714 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5716 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5717 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5719 if (kt <= d) { /* elapsed duration >= kt */
5720 spin_lock_irqsave(&sqp->qc_lock, iflags);
5721 sqcp->a_cmnd = NULL;
5722 atomic_dec(&devip->num_in_q);
5723 clear_bit(k, sqp->in_use_bm);
5724 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5727 /* call scsi_done() from this thread */
5731 /* otherwise reduce kt by elapsed time */
5736 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5737 spin_lock_irqsave(&sqp->qc_lock, iflags);
5738 if (!sd_dp->init_poll) {
5739 sd_dp->init_poll = true;
5740 sqcp->sd_dp = sd_dp;
5741 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5744 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5745 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5747 if (!sd_dp->init_hrt) {
5748 sd_dp->init_hrt = true;
5749 sqcp->sd_dp = sd_dp;
5750 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5751 HRTIMER_MODE_REL_PINNED);
5752 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5753 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5756 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5757 /* schedule the invocation of scsi_done() for a later time */
5758 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5760 if (sdebug_statistics)
5761 sd_dp->issuing_cpu = raw_smp_processor_id();
5762 } else { /* jdelay < 0, use work queue */
5763 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5764 atomic_read(&sdeb_inject_pending)))
5765 sd_dp->aborted = true;
5767 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5768 spin_lock_irqsave(&sqp->qc_lock, iflags);
5769 if (!sd_dp->init_poll) {
5770 sd_dp->init_poll = true;
5771 sqcp->sd_dp = sd_dp;
5772 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5775 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5776 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5778 if (!sd_dp->init_wq) {
5779 sd_dp->init_wq = true;
5780 sqcp->sd_dp = sd_dp;
5781 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5783 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5785 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5786 schedule_work(&sd_dp->ew.work);
5788 if (sdebug_statistics)
5789 sd_dp->issuing_cpu = raw_smp_processor_id();
5790 if (unlikely(sd_dp->aborted)) {
5791 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5792 scsi_cmd_to_rq(cmnd)->tag);
5793 blk_abort_request(scsi_cmd_to_rq(cmnd));
5794 atomic_set(&sdeb_inject_pending, 0);
5795 sd_dp->aborted = false;
5798 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5799 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5800 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5803 respond_in_thread: /* call back to mid-layer using invocation thread */
5804 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5805 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5806 if (cmnd->result == 0 && scsi_result != 0)
5807 cmnd->result = scsi_result;
5812 /* Note: The following macros create attribute files in the
5813 /sys/module/scsi_debug/parameters directory. Unfortunately this
5814 driver is unaware of a change and cannot trigger auxiliary actions
5815 as it can when the corresponding attribute in the
5816 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5818 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5819 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5820 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5821 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5822 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5823 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5824 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5825 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5826 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5827 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5828 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5829 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5830 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5831 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5832 module_param_string(inq_product, sdebug_inq_product_id,
5833 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5834 module_param_string(inq_rev, sdebug_inq_product_rev,
5835 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5836 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5837 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5838 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5839 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5840 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5841 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5842 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5843 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5844 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5845 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5846 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5848 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5850 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5851 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5852 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5853 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5854 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5855 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5856 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5857 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5858 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5859 module_param_named(per_host_store, sdebug_per_host_store, bool,
5861 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5862 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5863 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5864 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5865 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5866 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5867 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5868 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5869 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5870 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5871 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5872 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5873 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5874 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5875 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5876 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5877 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5878 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5880 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5881 module_param_named(write_same_length, sdebug_write_same_length, int,
5883 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5884 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5885 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5886 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5887 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5889 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5890 MODULE_DESCRIPTION("SCSI debug adapter driver");
5891 MODULE_LICENSE("GPL");
5892 MODULE_VERSION(SDEBUG_VERSION);
5894 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5895 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5896 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5897 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5898 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5899 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5900 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5901 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5902 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5903 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5904 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5905 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5906 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5907 MODULE_PARM_DESC(host_max_queue,
5908 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5909 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5910 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5911 SDEBUG_VERSION "\")");
5912 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5913 MODULE_PARM_DESC(lbprz,
5914 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5915 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5916 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5917 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5918 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5919 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5920 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5921 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5922 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5923 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5924 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5925 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5926 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5927 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5928 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5929 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5930 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5931 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5932 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5933 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5934 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5935 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5936 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5937 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5938 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5939 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5940 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5941 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5942 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5943 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5944 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5945 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5946 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5947 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5948 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5949 MODULE_PARM_DESC(uuid_ctl,
5950 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5951 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5952 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5953 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5954 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5955 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5956 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5957 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5958 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5959 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5961 #define SDEBUG_INFO_LEN 256
5962 static char sdebug_info[SDEBUG_INFO_LEN];
5964 static const char *scsi_debug_info(struct Scsi_Host *shp)
5968 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5969 my_name, SDEBUG_VERSION, sdebug_version_date);
5970 if (k >= (SDEBUG_INFO_LEN - 1))
5972 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5973 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5974 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5975 "statistics", (int)sdebug_statistics);
5979 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5980 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5985 int minLen = length > 15 ? 15 : length;
5987 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5989 memcpy(arr, buffer, minLen);
5991 if (1 != sscanf(arr, "%d", &opts))
5994 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5995 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5996 if (sdebug_every_nth != 0)
6001 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6002 * same for each scsi_debug host (if more than one). Some of the counters
6003 * output are not atomics so might be inaccurate in a busy system. */
6004 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6007 struct sdebug_queue *sqp;
6008 struct sdebug_host_info *sdhp;
6010 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6011 SDEBUG_VERSION, sdebug_version_date);
6012 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6013 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6014 sdebug_opts, sdebug_every_nth);
6015 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6016 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6017 sdebug_sector_size, "bytes");
6018 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6019 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6021 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6022 num_dev_resets, num_target_resets, num_bus_resets,
6024 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6025 dix_reads, dix_writes, dif_errors);
6026 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6028 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6029 atomic_read(&sdebug_cmnd_count),
6030 atomic_read(&sdebug_completions),
6031 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6032 atomic_read(&sdebug_a_tsf),
6033 atomic_read(&sdeb_mq_poll_count));
6035 seq_printf(m, "submit_queues=%d\n", submit_queues);
6036 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6037 seq_printf(m, " queue %d:\n", j);
6038 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6039 if (f != sdebug_max_queue) {
6040 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6041 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6042 "first,last bits", f, l);
6046 seq_printf(m, "this host_no=%d\n", host->host_no);
6047 if (!xa_empty(per_store_ap)) {
6050 unsigned long l_idx;
6051 struct sdeb_store_info *sip;
6053 seq_puts(m, "\nhost list:\n");
6055 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6057 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6058 sdhp->shost->host_no, idx);
6061 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6062 sdeb_most_recent_idx);
6064 xa_for_each(per_store_ap, l_idx, sip) {
6065 niu = xa_get_mark(per_store_ap, l_idx,
6066 SDEB_XA_NOT_IN_USE);
6068 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6069 (niu ? " not_in_use" : ""));
6076 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6078 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6080 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6081 * of delay is jiffies.
6083 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6088 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6090 if (sdebug_jdelay != jdelay) {
6092 struct sdebug_queue *sqp;
6094 block_unblock_all_queues(true);
6095 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6097 k = find_first_bit(sqp->in_use_bm,
6099 if (k != sdebug_max_queue) {
6100 res = -EBUSY; /* queued commands */
6105 sdebug_jdelay = jdelay;
6108 block_unblock_all_queues(false);
6114 static DRIVER_ATTR_RW(delay);
6116 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6118 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6120 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6121 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6122 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6127 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6128 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6130 if (sdebug_ndelay != ndelay) {
6132 struct sdebug_queue *sqp;
6134 block_unblock_all_queues(true);
6135 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6137 k = find_first_bit(sqp->in_use_bm,
6139 if (k != sdebug_max_queue) {
6140 res = -EBUSY; /* queued commands */
6145 sdebug_ndelay = ndelay;
6146 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6149 block_unblock_all_queues(false);
6155 static DRIVER_ATTR_RW(ndelay);
6157 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6159 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6162 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6168 if (sscanf(buf, "%10s", work) == 1) {
6169 if (strncasecmp(work, "0x", 2) == 0) {
6170 if (kstrtoint(work + 2, 16, &opts) == 0)
6173 if (kstrtoint(work, 10, &opts) == 0)
6180 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6181 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6185 static DRIVER_ATTR_RW(opts);
6187 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6189 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6191 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6196 /* Cannot change from or to TYPE_ZBC with sysfs */
6197 if (sdebug_ptype == TYPE_ZBC)
6200 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6208 static DRIVER_ATTR_RW(ptype);
6210 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6212 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6214 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6219 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6225 static DRIVER_ATTR_RW(dsense);
6227 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6229 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6231 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6236 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6237 bool want_store = (n == 0);
6238 struct sdebug_host_info *sdhp;
6241 sdebug_fake_rw = (sdebug_fake_rw > 0);
6242 if (sdebug_fake_rw == n)
6243 return count; /* not transitioning so do nothing */
6245 if (want_store) { /* 1 --> 0 transition, set up store */
6246 if (sdeb_first_idx < 0) {
6247 idx = sdebug_add_store();
6251 idx = sdeb_first_idx;
6252 xa_clear_mark(per_store_ap, idx,
6253 SDEB_XA_NOT_IN_USE);
6255 /* make all hosts use same store */
6256 list_for_each_entry(sdhp, &sdebug_host_list,
6258 if (sdhp->si_idx != idx) {
6259 xa_set_mark(per_store_ap, sdhp->si_idx,
6260 SDEB_XA_NOT_IN_USE);
6264 sdeb_most_recent_idx = idx;
6265 } else { /* 0 --> 1 transition is trigger for shrink */
6266 sdebug_erase_all_stores(true /* apart from first */);
6273 static DRIVER_ATTR_RW(fake_rw);
6275 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6277 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6279 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6284 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6285 sdebug_no_lun_0 = n;
6290 static DRIVER_ATTR_RW(no_lun_0);
6292 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6294 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6296 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6301 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6302 sdebug_num_tgts = n;
6303 sdebug_max_tgts_luns();
6308 static DRIVER_ATTR_RW(num_tgts);
6310 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6312 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6314 static DRIVER_ATTR_RO(dev_size_mb);
6316 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6318 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6321 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6326 if (kstrtobool(buf, &v))
6329 sdebug_per_host_store = v;
6332 static DRIVER_ATTR_RW(per_host_store);
6334 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6336 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6338 static DRIVER_ATTR_RO(num_parts);
6340 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6342 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6344 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6350 if (sscanf(buf, "%10s", work) == 1) {
6351 if (strncasecmp(work, "0x", 2) == 0) {
6352 if (kstrtoint(work + 2, 16, &nth) == 0)
6353 goto every_nth_done;
6355 if (kstrtoint(work, 10, &nth) == 0)
6356 goto every_nth_done;
6362 sdebug_every_nth = nth;
6363 if (nth && !sdebug_statistics) {
6364 pr_info("every_nth needs statistics=1, set it\n");
6365 sdebug_statistics = true;
6370 static DRIVER_ATTR_RW(every_nth);
6372 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6374 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6376 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6382 if (kstrtoint(buf, 0, &n))
6385 if (n > (int)SAM_LUN_AM_FLAT) {
6386 pr_warn("only LUN address methods 0 and 1 are supported\n");
6389 changed = ((int)sdebug_lun_am != n);
6391 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6392 struct sdebug_host_info *sdhp;
6393 struct sdebug_dev_info *dp;
6395 spin_lock(&sdebug_host_list_lock);
6396 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6397 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6398 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6401 spin_unlock(&sdebug_host_list_lock);
6407 static DRIVER_ATTR_RW(lun_format);
6409 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6411 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6413 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6419 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6421 pr_warn("max_luns can be no more than 256\n");
6424 changed = (sdebug_max_luns != n);
6425 sdebug_max_luns = n;
6426 sdebug_max_tgts_luns();
6427 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6428 struct sdebug_host_info *sdhp;
6429 struct sdebug_dev_info *dp;
6431 spin_lock(&sdebug_host_list_lock);
6432 list_for_each_entry(sdhp, &sdebug_host_list,
6434 list_for_each_entry(dp, &sdhp->dev_info_list,
6436 set_bit(SDEBUG_UA_LUNS_CHANGED,
6440 spin_unlock(&sdebug_host_list_lock);
6446 static DRIVER_ATTR_RW(max_luns);
6448 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6450 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6452 /* N.B. max_queue can be changed while there are queued commands. In flight
6453 * commands beyond the new max_queue will be completed. */
6454 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6458 struct sdebug_queue *sqp;
6460 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6461 (n <= SDEBUG_CANQUEUE) &&
6462 (sdebug_host_max_queue == 0)) {
6463 block_unblock_all_queues(true);
6465 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6467 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6471 sdebug_max_queue = n;
6472 if (k == SDEBUG_CANQUEUE)
6473 atomic_set(&retired_max_queue, 0);
6475 atomic_set(&retired_max_queue, k + 1);
6477 atomic_set(&retired_max_queue, 0);
6478 block_unblock_all_queues(false);
6483 static DRIVER_ATTR_RW(max_queue);
6485 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6487 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6490 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6492 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6495 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6499 if (kstrtobool(buf, &v))
6502 sdebug_no_rwlock = v;
6505 static DRIVER_ATTR_RW(no_rwlock);
6508 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6509 * in range [0, sdebug_host_max_queue), we can't change it.
6511 static DRIVER_ATTR_RO(host_max_queue);
6513 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6515 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6517 static DRIVER_ATTR_RO(no_uld);
6519 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6521 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6523 static DRIVER_ATTR_RO(scsi_level);
6525 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6527 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6529 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6535 /* Ignore capacity change for ZBC drives for now */
6536 if (sdeb_zbc_in_use)
6539 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6540 changed = (sdebug_virtual_gb != n);
6541 sdebug_virtual_gb = n;
6542 sdebug_capacity = get_sdebug_capacity();
6544 struct sdebug_host_info *sdhp;
6545 struct sdebug_dev_info *dp;
6547 spin_lock(&sdebug_host_list_lock);
6548 list_for_each_entry(sdhp, &sdebug_host_list,
6550 list_for_each_entry(dp, &sdhp->dev_info_list,
6552 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6556 spin_unlock(&sdebug_host_list_lock);
6562 static DRIVER_ATTR_RW(virtual_gb);
6564 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6566 /* absolute number of hosts currently active is what is shown */
6567 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6570 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6575 struct sdeb_store_info *sip;
6576 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6579 if (sscanf(buf, "%d", &delta_hosts) != 1)
6581 if (delta_hosts > 0) {
6585 xa_for_each_marked(per_store_ap, idx, sip,
6586 SDEB_XA_NOT_IN_USE) {
6587 sdeb_most_recent_idx = (int)idx;
6591 if (found) /* re-use case */
6592 sdebug_add_host_helper((int)idx);
6594 sdebug_do_add_host(true);
6596 sdebug_do_add_host(false);
6598 } while (--delta_hosts);
6599 } else if (delta_hosts < 0) {
6601 sdebug_do_remove_host(false);
6602 } while (++delta_hosts);
6606 static DRIVER_ATTR_RW(add_host);
6608 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6610 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6612 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6617 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6618 sdebug_vpd_use_hostno = n;
6623 static DRIVER_ATTR_RW(vpd_use_hostno);
6625 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6627 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6629 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6634 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6636 sdebug_statistics = true;
6638 clear_queue_stats();
6639 sdebug_statistics = false;
6645 static DRIVER_ATTR_RW(statistics);
6647 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6649 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6651 static DRIVER_ATTR_RO(sector_size);
6653 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6655 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6657 static DRIVER_ATTR_RO(submit_queues);
6659 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6661 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6663 static DRIVER_ATTR_RO(dix);
6665 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6667 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6669 static DRIVER_ATTR_RO(dif);
6671 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6673 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6675 static DRIVER_ATTR_RO(guard);
6677 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6679 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6681 static DRIVER_ATTR_RO(ato);
6683 static ssize_t map_show(struct device_driver *ddp, char *buf)
6687 if (!scsi_debug_lbp())
6688 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6689 sdebug_store_sectors);
6691 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6692 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6695 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6696 (int)map_size, sip->map_storep);
6698 buf[count++] = '\n';
6703 static DRIVER_ATTR_RO(map);
6705 static ssize_t random_show(struct device_driver *ddp, char *buf)
6707 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6710 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6715 if (kstrtobool(buf, &v))
6721 static DRIVER_ATTR_RW(random);
6723 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6725 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6727 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6732 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6733 sdebug_removable = (n > 0);
6738 static DRIVER_ATTR_RW(removable);
6740 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6742 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6744 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6745 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6750 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6751 sdebug_host_lock = (n > 0);
6756 static DRIVER_ATTR_RW(host_lock);
6758 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6760 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6762 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6767 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6768 sdebug_strict = (n > 0);
6773 static DRIVER_ATTR_RW(strict);
6775 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6777 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6779 static DRIVER_ATTR_RO(uuid_ctl);
6781 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6783 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6785 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6790 ret = kstrtoint(buf, 0, &n);
6794 all_config_cdb_len();
6797 static DRIVER_ATTR_RW(cdb_len);
6799 static const char * const zbc_model_strs_a[] = {
6800 [BLK_ZONED_NONE] = "none",
6801 [BLK_ZONED_HA] = "host-aware",
6802 [BLK_ZONED_HM] = "host-managed",
6805 static const char * const zbc_model_strs_b[] = {
6806 [BLK_ZONED_NONE] = "no",
6807 [BLK_ZONED_HA] = "aware",
6808 [BLK_ZONED_HM] = "managed",
6811 static const char * const zbc_model_strs_c[] = {
6812 [BLK_ZONED_NONE] = "0",
6813 [BLK_ZONED_HA] = "1",
6814 [BLK_ZONED_HM] = "2",
6817 static int sdeb_zbc_model_str(const char *cp)
6819 int res = sysfs_match_string(zbc_model_strs_a, cp);
6822 res = sysfs_match_string(zbc_model_strs_b, cp);
6824 res = sysfs_match_string(zbc_model_strs_c, cp);
6832 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6834 return scnprintf(buf, PAGE_SIZE, "%s\n",
6835 zbc_model_strs_a[sdeb_zbc_model]);
6837 static DRIVER_ATTR_RO(zbc);
6839 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6841 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6843 static DRIVER_ATTR_RO(tur_ms_to_ready);
6845 /* Note: The following array creates attribute files in the
6846 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6847 files (over those found in the /sys/module/scsi_debug/parameters
6848 directory) is that auxiliary actions can be triggered when an attribute
6849 is changed. For example see: add_host_store() above.
6852 static struct attribute *sdebug_drv_attrs[] = {
6853 &driver_attr_delay.attr,
6854 &driver_attr_opts.attr,
6855 &driver_attr_ptype.attr,
6856 &driver_attr_dsense.attr,
6857 &driver_attr_fake_rw.attr,
6858 &driver_attr_host_max_queue.attr,
6859 &driver_attr_no_lun_0.attr,
6860 &driver_attr_num_tgts.attr,
6861 &driver_attr_dev_size_mb.attr,
6862 &driver_attr_num_parts.attr,
6863 &driver_attr_every_nth.attr,
6864 &driver_attr_lun_format.attr,
6865 &driver_attr_max_luns.attr,
6866 &driver_attr_max_queue.attr,
6867 &driver_attr_no_rwlock.attr,
6868 &driver_attr_no_uld.attr,
6869 &driver_attr_scsi_level.attr,
6870 &driver_attr_virtual_gb.attr,
6871 &driver_attr_add_host.attr,
6872 &driver_attr_per_host_store.attr,
6873 &driver_attr_vpd_use_hostno.attr,
6874 &driver_attr_sector_size.attr,
6875 &driver_attr_statistics.attr,
6876 &driver_attr_submit_queues.attr,
6877 &driver_attr_dix.attr,
6878 &driver_attr_dif.attr,
6879 &driver_attr_guard.attr,
6880 &driver_attr_ato.attr,
6881 &driver_attr_map.attr,
6882 &driver_attr_random.attr,
6883 &driver_attr_removable.attr,
6884 &driver_attr_host_lock.attr,
6885 &driver_attr_ndelay.attr,
6886 &driver_attr_strict.attr,
6887 &driver_attr_uuid_ctl.attr,
6888 &driver_attr_cdb_len.attr,
6889 &driver_attr_tur_ms_to_ready.attr,
6890 &driver_attr_zbc.attr,
6893 ATTRIBUTE_GROUPS(sdebug_drv);
6895 static struct device *pseudo_primary;
6897 static int __init scsi_debug_init(void)
6899 bool want_store = (sdebug_fake_rw == 0);
6901 int k, ret, hosts_to_add;
6904 ramdisk_lck_a[0] = &atomic_rw;
6905 ramdisk_lck_a[1] = &atomic_rw2;
6906 atomic_set(&retired_max_queue, 0);
6908 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6909 pr_warn("ndelay must be less than 1 second, ignored\n");
6911 } else if (sdebug_ndelay > 0)
6912 sdebug_jdelay = JDELAY_OVERRIDDEN;
6914 switch (sdebug_sector_size) {
6921 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6925 switch (sdebug_dif) {
6926 case T10_PI_TYPE0_PROTECTION:
6928 case T10_PI_TYPE1_PROTECTION:
6929 case T10_PI_TYPE2_PROTECTION:
6930 case T10_PI_TYPE3_PROTECTION:
6931 have_dif_prot = true;
6935 pr_err("dif must be 0, 1, 2 or 3\n");
6939 if (sdebug_num_tgts < 0) {
6940 pr_err("num_tgts must be >= 0\n");
6944 if (sdebug_guard > 1) {
6945 pr_err("guard must be 0 or 1\n");
6949 if (sdebug_ato > 1) {
6950 pr_err("ato must be 0 or 1\n");
6954 if (sdebug_physblk_exp > 15) {
6955 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6959 sdebug_lun_am = sdebug_lun_am_i;
6960 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6961 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6962 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6965 if (sdebug_max_luns > 256) {
6966 if (sdebug_max_luns > 16384) {
6967 pr_warn("max_luns can be no more than 16384, use default\n");
6968 sdebug_max_luns = DEF_MAX_LUNS;
6970 sdebug_lun_am = SAM_LUN_AM_FLAT;
6973 if (sdebug_lowest_aligned > 0x3fff) {
6974 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6978 if (submit_queues < 1) {
6979 pr_err("submit_queues must be 1 or more\n");
6983 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6984 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6988 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6989 (sdebug_host_max_queue < 0)) {
6990 pr_err("host_max_queue must be in range [0 %d]\n",
6995 if (sdebug_host_max_queue &&
6996 (sdebug_max_queue != sdebug_host_max_queue)) {
6997 sdebug_max_queue = sdebug_host_max_queue;
6998 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7002 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
7004 if (sdebug_q_arr == NULL)
7006 for (k = 0; k < submit_queues; ++k)
7007 spin_lock_init(&sdebug_q_arr[k].qc_lock);
7010 * check for host managed zoned block device specified with
7011 * ptype=0x14 or zbc=XXX.
7013 if (sdebug_ptype == TYPE_ZBC) {
7014 sdeb_zbc_model = BLK_ZONED_HM;
7015 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7016 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7022 switch (sdeb_zbc_model) {
7023 case BLK_ZONED_NONE:
7025 sdebug_ptype = TYPE_DISK;
7028 sdebug_ptype = TYPE_ZBC;
7031 pr_err("Invalid ZBC model\n");
7036 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7037 sdeb_zbc_in_use = true;
7038 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7039 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7042 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7043 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7044 if (sdebug_dev_size_mb < 1)
7045 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7046 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7047 sdebug_store_sectors = sz / sdebug_sector_size;
7048 sdebug_capacity = get_sdebug_capacity();
7050 /* play around with geometry, don't waste too much on track 0 */
7052 sdebug_sectors_per = 32;
7053 if (sdebug_dev_size_mb >= 256)
7055 else if (sdebug_dev_size_mb >= 16)
7057 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7058 (sdebug_sectors_per * sdebug_heads);
7059 if (sdebug_cylinders_per >= 1024) {
7060 /* other LLDs do this; implies >= 1GB ram disk ... */
7062 sdebug_sectors_per = 63;
7063 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7064 (sdebug_sectors_per * sdebug_heads);
7066 if (scsi_debug_lbp()) {
7067 sdebug_unmap_max_blocks =
7068 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7070 sdebug_unmap_max_desc =
7071 clamp(sdebug_unmap_max_desc, 0U, 256U);
7073 sdebug_unmap_granularity =
7074 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7076 if (sdebug_unmap_alignment &&
7077 sdebug_unmap_granularity <=
7078 sdebug_unmap_alignment) {
7079 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7084 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7086 idx = sdebug_add_store();
7093 pseudo_primary = root_device_register("pseudo_0");
7094 if (IS_ERR(pseudo_primary)) {
7095 pr_warn("root_device_register() error\n");
7096 ret = PTR_ERR(pseudo_primary);
7099 ret = bus_register(&pseudo_lld_bus);
7101 pr_warn("bus_register error: %d\n", ret);
7104 ret = driver_register(&sdebug_driverfs_driver);
7106 pr_warn("driver_register error: %d\n", ret);
7110 hosts_to_add = sdebug_add_host;
7111 sdebug_add_host = 0;
7113 for (k = 0; k < hosts_to_add; k++) {
7114 if (want_store && k == 0) {
7115 ret = sdebug_add_host_helper(idx);
7117 pr_err("add_host_helper k=%d, error=%d\n",
7122 ret = sdebug_do_add_host(want_store &&
7123 sdebug_per_host_store);
7125 pr_err("add_host k=%d error=%d\n", k, -ret);
7131 pr_info("built %d host(s)\n", sdebug_num_hosts);
7136 bus_unregister(&pseudo_lld_bus);
7138 root_device_unregister(pseudo_primary);
7140 sdebug_erase_store(idx, NULL);
7142 kfree(sdebug_q_arr);
7146 static void __exit scsi_debug_exit(void)
7148 int k = sdebug_num_hosts;
7152 sdebug_do_remove_host(true);
7154 driver_unregister(&sdebug_driverfs_driver);
7155 bus_unregister(&pseudo_lld_bus);
7156 root_device_unregister(pseudo_primary);
7158 sdebug_erase_all_stores(false);
7159 xa_destroy(per_store_ap);
7160 kfree(sdebug_q_arr);
7163 device_initcall(scsi_debug_init);
7164 module_exit(scsi_debug_exit);
7166 static void sdebug_release_adapter(struct device *dev)
7168 struct sdebug_host_info *sdbg_host;
7170 sdbg_host = to_sdebug_host(dev);
7174 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7175 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7180 if (xa_empty(per_store_ap))
7182 sip = xa_load(per_store_ap, idx);
7186 vfree(sip->map_storep);
7187 vfree(sip->dif_storep);
7189 xa_erase(per_store_ap, idx);
7193 /* Assume apart_from_first==false only in shutdown case. */
7194 static void sdebug_erase_all_stores(bool apart_from_first)
7197 struct sdeb_store_info *sip = NULL;
7199 xa_for_each(per_store_ap, idx, sip) {
7200 if (apart_from_first)
7201 apart_from_first = false;
7203 sdebug_erase_store(idx, sip);
7205 if (apart_from_first)
7206 sdeb_most_recent_idx = sdeb_first_idx;
7210 * Returns store xarray new element index (idx) if >=0 else negated errno.
7211 * Limit the number of stores to 65536.
7213 static int sdebug_add_store(void)
7217 unsigned long iflags;
7218 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7219 struct sdeb_store_info *sip = NULL;
7220 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7222 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7226 xa_lock_irqsave(per_store_ap, iflags);
7227 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7228 if (unlikely(res < 0)) {
7229 xa_unlock_irqrestore(per_store_ap, iflags);
7231 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7234 sdeb_most_recent_idx = n_idx;
7235 if (sdeb_first_idx < 0)
7236 sdeb_first_idx = n_idx;
7237 xa_unlock_irqrestore(per_store_ap, iflags);
7240 sip->storep = vzalloc(sz);
7242 pr_err("user data oom\n");
7245 if (sdebug_num_parts > 0)
7246 sdebug_build_parts(sip->storep, sz);
7248 /* DIF/DIX: what T10 calls Protection Information (PI) */
7252 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7253 sip->dif_storep = vmalloc(dif_size);
7255 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7258 if (!sip->dif_storep) {
7259 pr_err("DIX oom\n");
7262 memset(sip->dif_storep, 0xff, dif_size);
7264 /* Logical Block Provisioning */
7265 if (scsi_debug_lbp()) {
7266 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7267 sip->map_storep = vmalloc(array_size(sizeof(long),
7268 BITS_TO_LONGS(map_size)));
7270 pr_info("%lu provisioning blocks\n", map_size);
7272 if (!sip->map_storep) {
7273 pr_err("LBP map oom\n");
7277 bitmap_zero(sip->map_storep, map_size);
7279 /* Map first 1KB for partition table */
7280 if (sdebug_num_parts)
7281 map_region(sip, 0, 2);
7284 rwlock_init(&sip->macc_lck);
7287 sdebug_erase_store((int)n_idx, sip);
7288 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7292 static int sdebug_add_host_helper(int per_host_idx)
7294 int k, devs_per_host, idx;
7295 int error = -ENOMEM;
7296 struct sdebug_host_info *sdbg_host;
7297 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7299 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7302 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7303 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7304 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7305 sdbg_host->si_idx = idx;
7307 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7309 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7310 for (k = 0; k < devs_per_host; k++) {
7311 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7316 spin_lock(&sdebug_host_list_lock);
7317 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7318 spin_unlock(&sdebug_host_list_lock);
7320 sdbg_host->dev.bus = &pseudo_lld_bus;
7321 sdbg_host->dev.parent = pseudo_primary;
7322 sdbg_host->dev.release = &sdebug_release_adapter;
7323 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7325 error = device_register(&sdbg_host->dev);
7327 spin_lock(&sdebug_host_list_lock);
7328 list_del(&sdbg_host->host_list);
7329 spin_unlock(&sdebug_host_list_lock);
7337 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7339 list_del(&sdbg_devinfo->dev_list);
7340 kfree(sdbg_devinfo->zstate);
7341 kfree(sdbg_devinfo);
7344 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7348 static int sdebug_do_add_host(bool mk_new_store)
7350 int ph_idx = sdeb_most_recent_idx;
7353 ph_idx = sdebug_add_store();
7357 return sdebug_add_host_helper(ph_idx);
7360 static void sdebug_do_remove_host(bool the_end)
7363 struct sdebug_host_info *sdbg_host = NULL;
7364 struct sdebug_host_info *sdbg_host2;
7366 spin_lock(&sdebug_host_list_lock);
7367 if (!list_empty(&sdebug_host_list)) {
7368 sdbg_host = list_entry(sdebug_host_list.prev,
7369 struct sdebug_host_info, host_list);
7370 idx = sdbg_host->si_idx;
7372 if (!the_end && idx >= 0) {
7375 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7376 if (sdbg_host2 == sdbg_host)
7378 if (idx == sdbg_host2->si_idx) {
7384 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7385 if (idx == sdeb_most_recent_idx)
7386 --sdeb_most_recent_idx;
7390 list_del(&sdbg_host->host_list);
7391 spin_unlock(&sdebug_host_list_lock);
7396 device_unregister(&sdbg_host->dev);
7400 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7403 struct sdebug_dev_info *devip;
7405 block_unblock_all_queues(true);
7406 devip = (struct sdebug_dev_info *)sdev->hostdata;
7407 if (NULL == devip) {
7408 block_unblock_all_queues(false);
7411 num_in_q = atomic_read(&devip->num_in_q);
7413 if (qdepth > SDEBUG_CANQUEUE) {
7414 qdepth = SDEBUG_CANQUEUE;
7415 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7416 qdepth, SDEBUG_CANQUEUE);
7420 if (qdepth != sdev->queue_depth)
7421 scsi_change_queue_depth(sdev, qdepth);
7423 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7424 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7425 __func__, qdepth, num_in_q);
7427 block_unblock_all_queues(false);
7428 return sdev->queue_depth;
7431 static bool fake_timeout(struct scsi_cmnd *scp)
7433 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7434 if (sdebug_every_nth < -1)
7435 sdebug_every_nth = -1;
7436 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7437 return true; /* ignore command causing timeout */
7438 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7439 scsi_medium_access_command(scp))
7440 return true; /* time out reads and writes */
7445 /* Response to TUR or media access command when device stopped */
7446 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7450 ktime_t now_ts = ktime_get_boottime();
7451 struct scsi_device *sdp = scp->device;
7453 stopped_state = atomic_read(&devip->stopped);
7454 if (stopped_state == 2) {
7455 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7456 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7457 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7458 /* tur_ms_to_ready timer extinguished */
7459 atomic_set(&devip->stopped, 0);
7463 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7465 sdev_printk(KERN_INFO, sdp,
7466 "%s: Not ready: in process of becoming ready\n", my_name);
7467 if (scp->cmnd[0] == TEST_UNIT_READY) {
7468 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7470 if (diff_ns <= tur_nanosecs_to_ready)
7471 diff_ns = tur_nanosecs_to_ready - diff_ns;
7473 diff_ns = tur_nanosecs_to_ready;
7474 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7475 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7476 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7478 return check_condition_result;
7481 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7483 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7485 return check_condition_result;
7488 static void sdebug_map_queues(struct Scsi_Host *shost)
7492 if (shost->nr_hw_queues == 1)
7495 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7496 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7500 if (i == HCTX_TYPE_DEFAULT)
7501 map->nr_queues = submit_queues - poll_queues;
7502 else if (i == HCTX_TYPE_POLL)
7503 map->nr_queues = poll_queues;
7505 if (!map->nr_queues) {
7506 BUG_ON(i == HCTX_TYPE_DEFAULT);
7510 map->queue_offset = qoff;
7511 blk_mq_map_queues(map);
7513 qoff += map->nr_queues;
7517 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7520 bool retiring = false;
7521 int num_entries = 0;
7522 unsigned int qc_idx = 0;
7523 unsigned long iflags;
7524 ktime_t kt_from_boot = ktime_get_boottime();
7525 struct sdebug_queue *sqp;
7526 struct sdebug_queued_cmd *sqcp;
7527 struct scsi_cmnd *scp;
7528 struct sdebug_dev_info *devip;
7529 struct sdebug_defer *sd_dp;
7531 sqp = sdebug_q_arr + queue_num;
7533 spin_lock_irqsave(&sqp->qc_lock, iflags);
7535 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7536 if (qc_idx >= sdebug_max_queue)
7539 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7542 if (!test_bit(qc_idx, sqp->in_use_bm))
7545 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7547 if (qc_idx >= sdebug_max_queue)
7550 sqcp = &sqp->qc_arr[qc_idx];
7551 sd_dp = sqcp->sd_dp;
7552 if (unlikely(!sd_dp))
7555 if (unlikely(scp == NULL)) {
7556 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7557 queue_num, qc_idx, __func__);
7560 if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7561 if (kt_from_boot < sd_dp->cmpl_ts)
7564 } else /* ignoring non REQ_POLLED requests */
7566 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7568 atomic_dec(&devip->num_in_q);
7570 pr_err("devip=NULL from %s\n", __func__);
7571 if (unlikely(atomic_read(&retired_max_queue) > 0))
7574 sqcp->a_cmnd = NULL;
7575 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7576 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7577 sqp, queue_num, qc_idx, __func__);
7580 if (unlikely(retiring)) { /* user has reduced max_queue */
7583 retval = atomic_read(&retired_max_queue);
7584 if (qc_idx >= retval) {
7585 pr_err("index %d too large\n", retval);
7588 k = find_last_bit(sqp->in_use_bm, retval);
7589 if ((k < sdebug_max_queue) || (k == retval))
7590 atomic_set(&retired_max_queue, 0);
7592 atomic_set(&retired_max_queue, k + 1);
7594 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7595 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7596 scsi_done(scp); /* callback to mid level */
7598 spin_lock_irqsave(&sqp->qc_lock, iflags);
7599 if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7604 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7606 if (num_entries > 0)
7607 atomic_add(num_entries, &sdeb_mq_poll_count);
7611 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7612 struct scsi_cmnd *scp)
7615 struct scsi_device *sdp = scp->device;
7616 const struct opcode_info_t *oip;
7617 const struct opcode_info_t *r_oip;
7618 struct sdebug_dev_info *devip;
7619 u8 *cmd = scp->cmnd;
7620 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7621 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7624 u64 lun_index = sdp->lun & 0x3FFF;
7631 scsi_set_resid(scp, 0);
7632 if (sdebug_statistics) {
7633 atomic_inc(&sdebug_cmnd_count);
7634 inject_now = inject_on_this_cmd();
7638 if (unlikely(sdebug_verbose &&
7639 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7644 sb = (int)sizeof(b);
7646 strcpy(b, "too long, over 32 bytes");
7648 for (k = 0, n = 0; k < len && n < sb; ++k)
7649 n += scnprintf(b + n, sb - n, "%02x ",
7652 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7653 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7655 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7656 return SCSI_MLQUEUE_HOST_BUSY;
7657 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7658 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7661 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7662 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7663 devip = (struct sdebug_dev_info *)sdp->hostdata;
7664 if (unlikely(!devip)) {
7665 devip = find_build_dev_info(sdp);
7669 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7670 atomic_set(&sdeb_inject_pending, 1);
7672 na = oip->num_attached;
7674 if (na) { /* multiple commands with this opcode */
7676 if (FF_SA & r_oip->flags) {
7677 if (F_SA_LOW & oip->flags)
7680 sa = get_unaligned_be16(cmd + 8);
7681 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7682 if (opcode == oip->opcode && sa == oip->sa)
7685 } else { /* since no service action only check opcode */
7686 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7687 if (opcode == oip->opcode)
7692 if (F_SA_LOW & r_oip->flags)
7693 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7694 else if (F_SA_HIGH & r_oip->flags)
7695 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7697 mk_sense_invalid_opcode(scp);
7700 } /* else (when na==0) we assume the oip is a match */
7702 if (unlikely(F_INV_OP & flags)) {
7703 mk_sense_invalid_opcode(scp);
7706 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7708 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7709 my_name, opcode, " supported for wlun");
7710 mk_sense_invalid_opcode(scp);
7713 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7717 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7718 rem = ~oip->len_mask[k] & cmd[k];
7720 for (j = 7; j >= 0; --j, rem <<= 1) {
7724 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7729 if (unlikely(!(F_SKIP_UA & flags) &&
7730 find_first_bit(devip->uas_bm,
7731 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7732 errsts = make_ua(scp, devip);
7736 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7737 atomic_read(&devip->stopped))) {
7738 errsts = resp_not_ready(scp, devip);
7742 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7744 if (unlikely(sdebug_every_nth)) {
7745 if (fake_timeout(scp))
7746 return 0; /* ignore command: make trouble */
7748 if (likely(oip->pfp))
7749 pfp = oip->pfp; /* calls a resp_* function */
7751 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7754 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7755 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7756 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7757 sdebug_ndelay > 10000)) {
7759 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7760 * for Start Stop Unit (SSU) want at least 1 second delay and
7761 * if sdebug_jdelay>1 want a long delay of that many seconds.
7762 * For Synchronize Cache want 1/20 of SSU's delay.
7764 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7765 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7767 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7768 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7770 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7773 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7775 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7778 static struct scsi_host_template sdebug_driver_template = {
7779 .show_info = scsi_debug_show_info,
7780 .write_info = scsi_debug_write_info,
7781 .proc_name = sdebug_proc_name,
7782 .name = "SCSI DEBUG",
7783 .info = scsi_debug_info,
7784 .slave_alloc = scsi_debug_slave_alloc,
7785 .slave_configure = scsi_debug_slave_configure,
7786 .slave_destroy = scsi_debug_slave_destroy,
7787 .ioctl = scsi_debug_ioctl,
7788 .queuecommand = scsi_debug_queuecommand,
7789 .change_queue_depth = sdebug_change_qdepth,
7790 .map_queues = sdebug_map_queues,
7791 .mq_poll = sdebug_blk_mq_poll,
7792 .eh_abort_handler = scsi_debug_abort,
7793 .eh_device_reset_handler = scsi_debug_device_reset,
7794 .eh_target_reset_handler = scsi_debug_target_reset,
7795 .eh_bus_reset_handler = scsi_debug_bus_reset,
7796 .eh_host_reset_handler = scsi_debug_host_reset,
7797 .can_queue = SDEBUG_CANQUEUE,
7799 .sg_tablesize = SG_MAX_SEGMENTS,
7800 .cmd_per_lun = DEF_CMD_PER_LUN,
7802 .max_segment_size = -1U,
7803 .module = THIS_MODULE,
7804 .track_queue_depth = 1,
7807 static int sdebug_driver_probe(struct device *dev)
7810 struct sdebug_host_info *sdbg_host;
7811 struct Scsi_Host *hpnt;
7814 sdbg_host = to_sdebug_host(dev);
7816 sdebug_driver_template.can_queue = sdebug_max_queue;
7817 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7818 if (!sdebug_clustering)
7819 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7821 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7823 pr_err("scsi_host_alloc failed\n");
7827 if (submit_queues > nr_cpu_ids) {
7828 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7829 my_name, submit_queues, nr_cpu_ids);
7830 submit_queues = nr_cpu_ids;
7833 * Decide whether to tell scsi subsystem that we want mq. The
7834 * following should give the same answer for each host.
7836 hpnt->nr_hw_queues = submit_queues;
7837 if (sdebug_host_max_queue)
7838 hpnt->host_tagset = 1;
7840 /* poll queues are possible for nr_hw_queues > 1 */
7841 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7842 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7843 my_name, poll_queues, hpnt->nr_hw_queues);
7848 * Poll queues don't need interrupts, but we need at least one I/O queue
7849 * left over for non-polled I/O.
7850 * If condition not met, trim poll_queues to 1 (just for simplicity).
7852 if (poll_queues >= submit_queues) {
7853 if (submit_queues < 3)
7854 pr_warn("%s: trim poll_queues to 1\n", my_name);
7856 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7857 my_name, submit_queues - 1);
7863 sdbg_host->shost = hpnt;
7864 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7865 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7866 hpnt->max_id = sdebug_num_tgts + 1;
7868 hpnt->max_id = sdebug_num_tgts;
7869 /* = sdebug_max_luns; */
7870 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7874 switch (sdebug_dif) {
7876 case T10_PI_TYPE1_PROTECTION:
7877 hprot = SHOST_DIF_TYPE1_PROTECTION;
7879 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7882 case T10_PI_TYPE2_PROTECTION:
7883 hprot = SHOST_DIF_TYPE2_PROTECTION;
7885 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7888 case T10_PI_TYPE3_PROTECTION:
7889 hprot = SHOST_DIF_TYPE3_PROTECTION;
7891 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7896 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7900 scsi_host_set_prot(hpnt, hprot);
7902 if (have_dif_prot || sdebug_dix)
7903 pr_info("host protection%s%s%s%s%s%s%s\n",
7904 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7905 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7906 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7907 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7908 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7909 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7910 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7912 if (sdebug_guard == 1)
7913 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7915 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7917 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7918 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7919 if (sdebug_every_nth) /* need stats counters for every_nth */
7920 sdebug_statistics = true;
7921 error = scsi_add_host(hpnt, &sdbg_host->dev);
7923 pr_err("scsi_add_host failed\n");
7925 scsi_host_put(hpnt);
7927 scsi_scan_host(hpnt);
7933 static void sdebug_driver_remove(struct device *dev)
7935 struct sdebug_host_info *sdbg_host;
7936 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7938 sdbg_host = to_sdebug_host(dev);
7940 scsi_remove_host(sdbg_host->shost);
7942 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7944 list_del(&sdbg_devinfo->dev_list);
7945 kfree(sdbg_devinfo->zstate);
7946 kfree(sdbg_devinfo);
7949 scsi_host_put(sdbg_host->shost);
7952 static int pseudo_lld_bus_match(struct device *dev,
7953 struct device_driver *dev_driver)
7958 static struct bus_type pseudo_lld_bus = {
7960 .match = pseudo_lld_bus_match,
7961 .probe = sdebug_driver_probe,
7962 .remove = sdebug_driver_remove,
7963 .drv_groups = sdebug_drv_groups,