1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
45 #include <net/checksum.h>
47 #include <asm/unaligned.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
65 #define MY_NAME "scsi_debug"
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST 1
109 #define DEF_NUM_TGTS 1
110 #define DEF_MAX_LUNS 1
111 /* With these defaults, this driver will make 1 host with 1 target
112 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT 0
118 #define DEF_DEV_SIZE_MB 8
119 #define DEF_ZBC_DEV_SIZE_MB 128
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE 0
124 #define DEF_EVERY_NTH 0
125 #define DEF_FAKE_RW 0
127 #define DEF_HOST_LOCK 0
130 #define DEF_LBPWS10 0
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0 0
135 #define DEF_NUM_PARTS 0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB 0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB 128
161 #define DEF_ZBC_MAX_OPEN_ZONES 8
162 #define DEF_ZBC_NR_CONV_ZONES 1
164 #define SDEBUG_LUN_0_VAL 0
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE 1
168 #define SDEBUG_OPT_MEDIUM_ERR 2
169 #define SDEBUG_OPT_TIMEOUT 4
170 #define SDEBUG_OPT_RECOVERED_ERR 8
171 #define SDEBUG_OPT_TRANSPORT_ERR 16
172 #define SDEBUG_OPT_DIF_ERR 32
173 #define SDEBUG_OPT_DIX_ERR 64
174 #define SDEBUG_OPT_MAC_TIMEOUT 128
175 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
176 #define SDEBUG_OPT_Q_NOISE 0x200
177 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
178 #define SDEBUG_OPT_RARE_TSF 0x800
179 #define SDEBUG_OPT_N_WCE 0x1000
180 #define SDEBUG_OPT_RESET_NOISE 0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
182 #define SDEBUG_OPT_HOST_BUSY 0x8000
183 #define SDEBUG_OPT_CMD_ABORT 0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 SDEBUG_OPT_TRANSPORT_ERR | \
188 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 SDEBUG_OPT_SHORT_TRANSFER | \
190 SDEBUG_OPT_HOST_BUSY | \
191 SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196 * priority order. In the subset implemented here lower numbers have higher
197 * priority. The UA numbers should be a sequence starting from 0 with
198 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210 * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN 1 /* Data-in command (e.g. READ) */
227 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
230 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
233 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
236 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
238 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
247 #define SDEBUG_MAX_PARTS 4
249 #define SDEBUG_MAX_CMD_LEN 32
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 /* Zone types (zbcr05 table 25) */
258 /* ZBC_ZTYPE_SOBR = 0x4, */
262 /* enumeration names taken from table 26, zbcr05 */
264 ZBC_NOT_WRITE_POINTER = 0x0,
266 ZC2_IMPLICIT_OPEN = 0x2,
267 ZC3_EXPLICIT_OPEN = 0x3,
274 struct sdeb_zone_state { /* ZBC: per zone state */
275 enum sdebug_z_type z_type;
276 enum sdebug_z_cond z_cond;
277 bool z_non_seq_resource;
283 struct sdebug_dev_info {
284 struct list_head dev_list;
285 unsigned int channel;
289 struct sdebug_host_info *sdbg_host;
290 unsigned long uas_bm[1];
292 atomic_t stopped; /* 1: by SSU, 2: device start */
295 /* For ZBC devices */
296 enum blk_zoned_model zmodel;
299 unsigned int zsize_shift;
300 unsigned int nr_zones;
301 unsigned int nr_conv_zones;
302 unsigned int nr_seq_zones;
303 unsigned int nr_imp_open;
304 unsigned int nr_exp_open;
305 unsigned int nr_closed;
306 unsigned int max_open;
307 ktime_t create_ts; /* time since bootup that this device was created */
308 struct sdeb_zone_state *zstate;
311 struct sdebug_host_info {
312 struct list_head host_list;
313 int si_idx; /* sdeb_store_info (per host) xarray index */
314 struct Scsi_Host *shost;
316 struct list_head dev_info_list;
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 rwlock_t macc_lck; /* for atomic media access on this store */
322 u8 *storep; /* user data storage (ram) */
323 struct t10_pi_tuple *dif_storep; /* protection info */
324 void *map_storep; /* provisioning map */
327 #define to_sdebug_host(d) \
328 container_of(d, struct sdebug_host_info, dev)
330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
333 struct sdebug_defer {
335 struct execute_work ew;
336 ktime_t cmpl_ts;/* time since boot to complete this cmd */
337 int sqa_idx; /* index of sdebug_queue array */
338 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
339 int hc_idx; /* hostwide tag index */
344 bool aborted; /* true when blk_abort_request() already called */
345 enum sdeb_defer_type defer_t;
348 struct sdebug_queued_cmd {
349 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 * instance indicates this slot is in use.
352 struct sdebug_defer *sd_dp;
353 struct scsi_cmnd *a_cmnd;
356 struct sdebug_queue {
357 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
358 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
360 atomic_t blocked; /* to temporarily stop more being queued */
363 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
364 static atomic_t sdebug_completions; /* count of deferred completions */
365 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
366 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
367 static atomic_t sdeb_inject_pending;
368 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
370 struct opcode_info_t {
371 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
372 /* for terminating element */
373 u8 opcode; /* if num_attached > 0, preferred */
374 u16 sa; /* service action */
375 u32 flags; /* OR-ed set of SDEB_F_* */
376 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
377 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
378 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
379 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
382 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
383 enum sdeb_opcode_index {
384 SDEB_I_INVALID_OPCODE = 0,
386 SDEB_I_REPORT_LUNS = 2,
387 SDEB_I_REQUEST_SENSE = 3,
388 SDEB_I_TEST_UNIT_READY = 4,
389 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
390 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
391 SDEB_I_LOG_SENSE = 7,
392 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
393 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
394 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
395 SDEB_I_START_STOP = 11,
396 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
397 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
398 SDEB_I_MAINT_IN = 14,
399 SDEB_I_MAINT_OUT = 15,
400 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
401 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
402 SDEB_I_RESERVE = 18, /* 6, 10 */
403 SDEB_I_RELEASE = 19, /* 6, 10 */
404 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
405 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
406 SDEB_I_ATA_PT = 22, /* 12, 16 */
407 SDEB_I_SEND_DIAG = 23,
409 SDEB_I_WRITE_BUFFER = 25,
410 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
411 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
412 SDEB_I_COMP_WRITE = 28,
413 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
414 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
415 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
416 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
420 static const unsigned char opcode_ind_arr[256] = {
421 /* 0x0; 0x0->0x1f: 6 byte cdbs */
422 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
424 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
425 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
427 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
428 SDEB_I_ALLOW_REMOVAL, 0,
429 /* 0x20; 0x20->0x3f: 10 byte cdbs */
430 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
431 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
432 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
433 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
434 /* 0x40; 0x40->0x5f: 10 byte cdbs */
435 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
437 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
439 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
440 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
441 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
442 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 0, SDEB_I_VARIABLE_LEN,
444 /* 0x80; 0x80->0x9f: 16 byte cdbs */
445 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
446 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
447 0, 0, 0, SDEB_I_VERIFY,
448 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
449 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
450 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
451 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
452 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
453 SDEB_I_MAINT_OUT, 0, 0, 0,
454 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
455 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0,
457 0, 0, 0, 0, 0, 0, 0, 0,
458 /* 0xc0; 0xc0->0xff: vendor specific */
459 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
466 * The following "response" functions return the SCSI mid-level's 4 byte
467 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
468 * command completion, they can mask their return value with
469 * SDEG_RES_IMMED_MASK .
471 #define SDEG_RES_IMMED_MASK 0x40000000
473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int sdebug_do_add_host(bool mk_new_store);
504 static int sdebug_add_host_helper(int per_host_idx);
505 static void sdebug_do_remove_host(bool the_end);
506 static int sdebug_add_store(void);
507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
508 static void sdebug_erase_all_stores(bool apart_from_first);
511 * The following are overflow arrays for cdbs that "hit" the same index in
512 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513 * should be placed in opcode_info_arr[], the others should be placed here.
515 static const struct opcode_info_t msense_iarr[] = {
516 {0, 0x1a, 0, F_D_IN, NULL, NULL,
517 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 static const struct opcode_info_t mselect_iarr[] = {
521 {0, 0x15, 0, F_D_OUT, NULL, NULL,
522 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 static const struct opcode_info_t read_iarr[] = {
526 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
529 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
536 static const struct opcode_info_t write_iarr[] = {
537 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
538 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
540 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
541 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
543 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
544 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 0xbf, 0xc7, 0, 0, 0, 0} },
548 static const struct opcode_info_t verify_iarr[] = {
549 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
560 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
561 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
564 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
569 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
570 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
578 static const struct opcode_info_t write_same_iarr[] = {
579 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
584 static const struct opcode_info_t reserve_iarr[] = {
585 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
586 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 static const struct opcode_info_t release_iarr[] = {
590 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
591 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
606 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
607 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
610 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
613 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
618 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
619 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626 * plus the terminating elements for logic that scans this table such as
627 * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
630 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
631 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 0, 0} }, /* REPORT LUNS */
637 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
643 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
644 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
646 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
647 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
649 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
651 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
652 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
654 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
656 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
658 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 resp_write_dt0, write_iarr, /* WRITE(16) */
660 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
671 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
673 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 0xff, 0, 0xc7, 0, 0, 0, 0} },
676 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
680 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
684 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
686 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
688 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 NULL, release_iarr, /* RELEASE(10) <no response function> */
692 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
695 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
702 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
706 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 0, 0, 0, 0} }, /* WRITE_BUFFER */
709 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
711 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
713 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 resp_sync_cache, sync_cache_iarr,
715 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
717 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
720 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 resp_pre_fetch, pre_fetch_iarr,
722 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 0, 0, 0, 0} }, /* PRE-FETCH (10) */
726 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
735 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
736 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue; /* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
758 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
759 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
760 static int sdebug_no_uld;
761 static int sdebug_num_parts = DEF_NUM_PARTS;
762 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
763 static int sdebug_opt_blks = DEF_OPT_BLKS;
764 static int sdebug_opts = DEF_OPTS;
765 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
766 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
767 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
768 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
769 static int sdebug_sector_size = DEF_SECTOR_SIZE;
770 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
771 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
772 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
773 static unsigned int sdebug_lbpu = DEF_LBPU;
774 static unsigned int sdebug_lbpws = DEF_LBPWS;
775 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
776 static unsigned int sdebug_lbprz = DEF_LBPRZ;
777 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
778 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
779 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
780 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
781 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
782 static int sdebug_uuid_ctl = DEF_UUID_CTL;
783 static bool sdebug_random = DEF_RANDOM;
784 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
785 static bool sdebug_removable = DEF_REMOVABLE;
786 static bool sdebug_clustering;
787 static bool sdebug_host_lock = DEF_HOST_LOCK;
788 static bool sdebug_strict = DEF_STRICT;
789 static bool sdebug_any_injecting_opt;
790 static bool sdebug_no_rwlock;
791 static bool sdebug_verbose;
792 static bool have_dif_prot;
793 static bool write_since_sync;
794 static bool sdebug_statistics = DEF_STATISTICS;
795 static bool sdebug_wp;
796 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
798 static char *sdeb_zbc_model_s;
800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
801 SAM_LUN_AM_FLAT = 0x1,
802 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
803 SAM_LUN_AM_EXTENDED = 0x3};
804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
807 static unsigned int sdebug_store_sectors;
808 static sector_t sdebug_capacity; /* in sectors */
810 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
811 may still need them */
812 static int sdebug_heads; /* heads per disk */
813 static int sdebug_cylinders_per; /* cylinders per surface */
814 static int sdebug_sectors_per; /* sectors per cylinder */
816 static LIST_HEAD(sdebug_host_list);
817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
819 static struct xarray per_store_arr;
820 static struct xarray *per_store_ap = &per_store_arr;
821 static int sdeb_first_idx = -1; /* invalid index ==> none created */
822 static int sdeb_most_recent_idx = -1;
823 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
825 static unsigned long map_size;
826 static int num_aborts;
827 static int num_dev_resets;
828 static int num_target_resets;
829 static int num_bus_resets;
830 static int num_host_resets;
831 static int dix_writes;
832 static int dix_reads;
833 static int dif_errors;
835 /* ZBC global data */
836 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
837 static int sdeb_zbc_zone_cap_mb;
838 static int sdeb_zbc_zone_size_mb;
839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
842 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
843 static int poll_queues; /* iouring iopoll interface.*/
844 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
846 static DEFINE_RWLOCK(atomic_rw);
847 static DEFINE_RWLOCK(atomic_rw2);
849 static rwlock_t *ramdisk_lck_a[2];
851 static char sdebug_proc_name[] = MY_NAME;
852 static const char *my_name = MY_NAME;
854 static struct bus_type pseudo_lld_bus;
856 static struct device_driver sdebug_driverfs_driver = {
857 .name = sdebug_proc_name,
858 .bus = &pseudo_lld_bus,
861 static const int check_condition_result =
862 SAM_STAT_CHECK_CONDITION;
864 static const int illegal_condition_result =
865 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
867 static const int device_qfull_result =
868 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
870 static const int condition_met_result = SAM_STAT_CONDITION_MET;
873 /* Only do the extra work involved in logical block provisioning if one or
874 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
875 * real reads and writes (i.e. not skipping them for speed).
877 static inline bool scsi_debug_lbp(void)
879 return 0 == sdebug_fake_rw &&
880 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
883 static void *lba2fake_store(struct sdeb_store_info *sip,
884 unsigned long long lba)
886 struct sdeb_store_info *lsip = sip;
888 lba = do_div(lba, sdebug_store_sectors);
889 if (!sip || !sip->storep) {
891 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
893 return lsip->storep + lba * sdebug_sector_size;
896 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
899 sector = sector_div(sector, sdebug_store_sectors);
901 return sip->dif_storep + sector;
904 static void sdebug_max_tgts_luns(void)
906 struct sdebug_host_info *sdbg_host;
907 struct Scsi_Host *hpnt;
909 spin_lock(&sdebug_host_list_lock);
910 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
911 hpnt = sdbg_host->shost;
912 if ((hpnt->this_id >= 0) &&
913 (sdebug_num_tgts > hpnt->this_id))
914 hpnt->max_id = sdebug_num_tgts + 1;
916 hpnt->max_id = sdebug_num_tgts;
917 /* sdebug_max_luns; */
918 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
920 spin_unlock(&sdebug_host_list_lock);
923 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
925 /* Set in_bit to -1 to indicate no bit position of invalid field */
926 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
927 enum sdeb_cmd_data c_d,
928 int in_byte, int in_bit)
930 unsigned char *sbuff;
934 sbuff = scp->sense_buffer;
936 sdev_printk(KERN_ERR, scp->device,
937 "%s: sense_buffer is NULL\n", __func__);
940 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
941 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
942 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
943 memset(sks, 0, sizeof(sks));
949 sks[0] |= 0x7 & in_bit;
951 put_unaligned_be16(in_byte, sks + 1);
957 memcpy(sbuff + sl + 4, sks, 3);
959 memcpy(sbuff + 15, sks, 3);
961 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
962 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
963 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
966 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
968 if (!scp->sense_buffer) {
969 sdev_printk(KERN_ERR, scp->device,
970 "%s: sense_buffer is NULL\n", __func__);
973 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
975 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
978 sdev_printk(KERN_INFO, scp->device,
979 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980 my_name, key, asc, asq);
983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
985 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
991 if (sdebug_verbose) {
993 sdev_printk(KERN_INFO, dev,
994 "%s: BLKFLSBUF [0x1261]\n", __func__);
995 else if (0x5331 == cmd)
996 sdev_printk(KERN_INFO, dev,
997 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1000 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1004 /* return -ENOTTY; // correct return but upsets fdisk */
1007 static void config_cdb_len(struct scsi_device *sdev)
1009 switch (sdebug_cdb_len) {
1010 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011 sdev->use_10_for_rw = false;
1012 sdev->use_16_for_rw = false;
1013 sdev->use_10_for_ms = false;
1015 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016 sdev->use_10_for_rw = true;
1017 sdev->use_16_for_rw = false;
1018 sdev->use_10_for_ms = false;
1020 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021 sdev->use_10_for_rw = true;
1022 sdev->use_16_for_rw = false;
1023 sdev->use_10_for_ms = true;
1026 sdev->use_10_for_rw = false;
1027 sdev->use_16_for_rw = true;
1028 sdev->use_10_for_ms = true;
1030 case 32: /* No knobs to suggest this so same as 16 for now */
1031 sdev->use_10_for_rw = false;
1032 sdev->use_16_for_rw = true;
1033 sdev->use_10_for_ms = true;
1036 pr_warn("unexpected cdb_len=%d, force to 10\n",
1038 sdev->use_10_for_rw = true;
1039 sdev->use_16_for_rw = false;
1040 sdev->use_10_for_ms = false;
1041 sdebug_cdb_len = 10;
1046 static void all_config_cdb_len(void)
1048 struct sdebug_host_info *sdbg_host;
1049 struct Scsi_Host *shost;
1050 struct scsi_device *sdev;
1052 spin_lock(&sdebug_host_list_lock);
1053 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 shost = sdbg_host->shost;
1055 shost_for_each_device(sdev, shost) {
1056 config_cdb_len(sdev);
1059 spin_unlock(&sdebug_host_list_lock);
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1064 struct sdebug_host_info *sdhp;
1065 struct sdebug_dev_info *dp;
1067 spin_lock(&sdebug_host_list_lock);
1068 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 if ((devip->sdbg_host == dp->sdbg_host) &&
1071 (devip->target == dp->target))
1072 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1075 spin_unlock(&sdebug_host_list_lock);
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1082 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 if (k != SDEBUG_NUM_UAS) {
1084 const char *cp = NULL;
1088 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 POWER_ON_RESET_ASCQ);
1091 cp = "power on reset";
1093 case SDEBUG_UA_POOCCUR:
1094 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 POWER_ON_OCCURRED_ASCQ);
1097 cp = "power on occurred";
1099 case SDEBUG_UA_BUS_RESET:
1100 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1105 case SDEBUG_UA_MODE_CHANGED:
1106 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1109 cp = "mode parameters changed";
1111 case SDEBUG_UA_CAPACITY_CHANGED:
1112 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1113 CAPACITY_CHANGED_ASCQ);
1115 cp = "capacity data changed";
1117 case SDEBUG_UA_MICROCODE_CHANGED:
1118 mk_sense_buffer(scp, UNIT_ATTENTION,
1120 MICROCODE_CHANGED_ASCQ);
1122 cp = "microcode has been changed";
1124 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1125 mk_sense_buffer(scp, UNIT_ATTENTION,
1127 MICROCODE_CHANGED_WO_RESET_ASCQ);
1129 cp = "microcode has been changed without reset";
1131 case SDEBUG_UA_LUNS_CHANGED:
1133 * SPC-3 behavior is to report a UNIT ATTENTION with
1134 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1135 * on the target, until a REPORT LUNS command is
1136 * received. SPC-4 behavior is to report it only once.
1137 * NOTE: sdebug_scsi_level does not use the same
1138 * values as struct scsi_device->scsi_level.
1140 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1141 clear_luns_changed_on_target(devip);
1142 mk_sense_buffer(scp, UNIT_ATTENTION,
1146 cp = "reported luns data has changed";
1149 pr_warn("unexpected unit attention code=%d\n", k);
1154 clear_bit(k, devip->uas_bm);
1156 sdev_printk(KERN_INFO, scp->device,
1157 "%s reports: Unit attention: %s\n",
1159 return check_condition_result;
1164 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1165 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1169 struct scsi_data_buffer *sdb = &scp->sdb;
1173 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1174 return DID_ERROR << 16;
1176 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1178 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1183 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1184 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1185 * calls, not required to write in ascending offset order. Assumes resid
1186 * set to scsi_bufflen() prior to any calls.
1188 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1189 int arr_len, unsigned int off_dst)
1191 unsigned int act_len, n;
1192 struct scsi_data_buffer *sdb = &scp->sdb;
1193 off_t skip = off_dst;
1195 if (sdb->length <= off_dst)
1197 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1198 return DID_ERROR << 16;
1200 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1201 arr, arr_len, skip);
1202 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1203 __func__, off_dst, scsi_bufflen(scp), act_len,
1204 scsi_get_resid(scp));
1205 n = scsi_bufflen(scp) - (off_dst + act_len);
1206 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1210 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1211 * 'arr' or -1 if error.
1213 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1216 if (!scsi_bufflen(scp))
1218 if (scp->sc_data_direction != DMA_TO_DEVICE)
1221 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1225 static char sdebug_inq_vendor_id[9] = "Linux ";
1226 static char sdebug_inq_product_id[17] = "scsi_debug ";
1227 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1228 /* Use some locally assigned NAAs for SAS addresses. */
1229 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1230 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1231 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1233 /* Device identification VPD page. Returns number of bytes placed in arr */
1234 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1235 int target_dev_id, int dev_id_num,
1236 const char *dev_id_str, int dev_id_str_len,
1237 const uuid_t *lu_name)
1242 port_a = target_dev_id + 1;
1243 /* T10 vendor identifier field format (faked) */
1244 arr[0] = 0x2; /* ASCII */
1247 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1248 memcpy(&arr[12], sdebug_inq_product_id, 16);
1249 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1250 num = 8 + 16 + dev_id_str_len;
1253 if (dev_id_num >= 0) {
1254 if (sdebug_uuid_ctl) {
1255 /* Locally assigned UUID */
1256 arr[num++] = 0x1; /* binary (not necessarily sas) */
1257 arr[num++] = 0xa; /* PIV=0, lu, naa */
1260 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1262 memcpy(arr + num, lu_name, 16);
1265 /* NAA-3, Logical unit identifier (binary) */
1266 arr[num++] = 0x1; /* binary (not necessarily sas) */
1267 arr[num++] = 0x3; /* PIV=0, lu, naa */
1270 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1273 /* Target relative port number */
1274 arr[num++] = 0x61; /* proto=sas, binary */
1275 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1276 arr[num++] = 0x0; /* reserved */
1277 arr[num++] = 0x4; /* length */
1278 arr[num++] = 0x0; /* reserved */
1279 arr[num++] = 0x0; /* reserved */
1281 arr[num++] = 0x1; /* relative port A */
1283 /* NAA-3, Target port identifier */
1284 arr[num++] = 0x61; /* proto=sas, binary */
1285 arr[num++] = 0x93; /* piv=1, target port, naa */
1288 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1290 /* NAA-3, Target port group identifier */
1291 arr[num++] = 0x61; /* proto=sas, binary */
1292 arr[num++] = 0x95; /* piv=1, target port group id */
1297 put_unaligned_be16(port_group_id, arr + num);
1299 /* NAA-3, Target device identifier */
1300 arr[num++] = 0x61; /* proto=sas, binary */
1301 arr[num++] = 0xa3; /* piv=1, target device, naa */
1304 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1306 /* SCSI name string: Target device identifier */
1307 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1308 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1311 memcpy(arr + num, "naa.32222220", 12);
1313 snprintf(b, sizeof(b), "%08X", target_dev_id);
1314 memcpy(arr + num, b, 8);
1316 memset(arr + num, 0, 4);
1321 static unsigned char vpd84_data[] = {
1322 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1323 0x22,0x22,0x22,0x0,0xbb,0x1,
1324 0x22,0x22,0x22,0x0,0xbb,0x2,
1327 /* Software interface identification VPD page */
1328 static int inquiry_vpd_84(unsigned char *arr)
1330 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1331 return sizeof(vpd84_data);
1334 /* Management network addresses VPD page */
1335 static int inquiry_vpd_85(unsigned char *arr)
1338 const char *na1 = "https://www.kernel.org/config";
1339 const char *na2 = "http://www.kernel.org/log";
1342 arr[num++] = 0x1; /* lu, storage config */
1343 arr[num++] = 0x0; /* reserved */
1348 plen = ((plen / 4) + 1) * 4;
1349 arr[num++] = plen; /* length, null termianted, padded */
1350 memcpy(arr + num, na1, olen);
1351 memset(arr + num + olen, 0, plen - olen);
1354 arr[num++] = 0x4; /* lu, logging */
1355 arr[num++] = 0x0; /* reserved */
1360 plen = ((plen / 4) + 1) * 4;
1361 arr[num++] = plen; /* length, null terminated, padded */
1362 memcpy(arr + num, na2, olen);
1363 memset(arr + num + olen, 0, plen - olen);
1369 /* SCSI ports VPD page */
1370 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1375 port_a = target_dev_id + 1;
1376 port_b = port_a + 1;
1377 arr[num++] = 0x0; /* reserved */
1378 arr[num++] = 0x0; /* reserved */
1380 arr[num++] = 0x1; /* relative port 1 (primary) */
1381 memset(arr + num, 0, 6);
1384 arr[num++] = 12; /* length tp descriptor */
1385 /* naa-5 target port identifier (A) */
1386 arr[num++] = 0x61; /* proto=sas, binary */
1387 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1388 arr[num++] = 0x0; /* reserved */
1389 arr[num++] = 0x8; /* length */
1390 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1392 arr[num++] = 0x0; /* reserved */
1393 arr[num++] = 0x0; /* reserved */
1395 arr[num++] = 0x2; /* relative port 2 (secondary) */
1396 memset(arr + num, 0, 6);
1399 arr[num++] = 12; /* length tp descriptor */
1400 /* naa-5 target port identifier (B) */
1401 arr[num++] = 0x61; /* proto=sas, binary */
1402 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1403 arr[num++] = 0x0; /* reserved */
1404 arr[num++] = 0x8; /* length */
1405 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1412 static unsigned char vpd89_data[] = {
1413 /* from 4th byte */ 0,0,0,0,
1414 'l','i','n','u','x',' ',' ',' ',
1415 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1417 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1419 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1420 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1422 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1428 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1429 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1430 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1432 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1433 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1434 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1439 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1440 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1441 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1456 /* ATA Information VPD page */
1457 static int inquiry_vpd_89(unsigned char *arr)
1459 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1460 return sizeof(vpd89_data);
1464 static unsigned char vpdb0_data[] = {
1465 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1466 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1471 /* Block limits VPD page (SBC-3) */
1472 static int inquiry_vpd_b0(unsigned char *arr)
1476 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1478 /* Optimal transfer length granularity */
1479 if (sdebug_opt_xferlen_exp != 0 &&
1480 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1481 gran = 1 << sdebug_opt_xferlen_exp;
1483 gran = 1 << sdebug_physblk_exp;
1484 put_unaligned_be16(gran, arr + 2);
1486 /* Maximum Transfer Length */
1487 if (sdebug_store_sectors > 0x400)
1488 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1490 /* Optimal Transfer Length */
1491 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1494 /* Maximum Unmap LBA Count */
1495 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1497 /* Maximum Unmap Block Descriptor Count */
1498 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1501 /* Unmap Granularity Alignment */
1502 if (sdebug_unmap_alignment) {
1503 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1504 arr[28] |= 0x80; /* UGAVALID */
1507 /* Optimal Unmap Granularity */
1508 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1510 /* Maximum WRITE SAME Length */
1511 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1513 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1516 /* Block device characteristics VPD page (SBC-3) */
1517 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1519 memset(arr, 0, 0x3c);
1521 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1523 arr[3] = 5; /* less than 1.8" */
1524 if (devip->zmodel == BLK_ZONED_HA)
1525 arr[4] = 1 << 4; /* zoned field = 01b */
1530 /* Logical block provisioning VPD page (SBC-4) */
1531 static int inquiry_vpd_b2(unsigned char *arr)
1533 memset(arr, 0, 0x4);
1534 arr[0] = 0; /* threshold exponent */
1541 if (sdebug_lbprz && scsi_debug_lbp())
1542 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1543 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1544 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1545 /* threshold_percentage=0 */
1549 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1550 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1552 memset(arr, 0, 0x3c);
1553 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1555 * Set Optimal number of open sequential write preferred zones and
1556 * Optimal number of non-sequentially written sequential write
1557 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1558 * fields set to zero, apart from Max. number of open swrz_s field.
1560 put_unaligned_be32(0xffffffff, &arr[4]);
1561 put_unaligned_be32(0xffffffff, &arr[8]);
1562 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1563 put_unaligned_be32(devip->max_open, &arr[12]);
1565 put_unaligned_be32(0xffffffff, &arr[12]);
1566 if (devip->zcap < devip->zsize) {
1567 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1568 put_unaligned_be64(devip->zsize, &arr[20]);
1575 #define SDEBUG_LONG_INQ_SZ 96
1576 #define SDEBUG_MAX_INQ_ARR_SZ 584
1578 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1580 unsigned char pq_pdt;
1582 unsigned char *cmd = scp->cmnd;
1585 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1587 alloc_len = get_unaligned_be16(cmd + 3);
1588 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1590 return DID_REQUEUE << 16;
1591 is_disk = (sdebug_ptype == TYPE_DISK);
1592 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1593 is_disk_zbc = (is_disk || is_zbc);
1594 have_wlun = scsi_is_wlun(scp->device->lun);
1596 pq_pdt = TYPE_WLUN; /* present, wlun */
1597 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1598 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1600 pq_pdt = (sdebug_ptype & 0x1f);
1602 if (0x2 & cmd[1]) { /* CMDDT bit set */
1603 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1605 return check_condition_result;
1606 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1607 int lu_id_num, port_group_id, target_dev_id;
1610 int host_no = devip->sdbg_host->shost->host_no;
1612 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1613 (devip->channel & 0x7f);
1614 if (sdebug_vpd_use_hostno == 0)
1616 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1617 (devip->target * 1000) + devip->lun);
1618 target_dev_id = ((host_no + 1) * 2000) +
1619 (devip->target * 1000) - 3;
1620 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1621 if (0 == cmd[2]) { /* supported vital product data pages */
1622 arr[1] = cmd[2]; /*sanity */
1624 arr[n++] = 0x0; /* this page */
1625 arr[n++] = 0x80; /* unit serial number */
1626 arr[n++] = 0x83; /* device identification */
1627 arr[n++] = 0x84; /* software interface ident. */
1628 arr[n++] = 0x85; /* management network addresses */
1629 arr[n++] = 0x86; /* extended inquiry */
1630 arr[n++] = 0x87; /* mode page policy */
1631 arr[n++] = 0x88; /* SCSI ports */
1632 if (is_disk_zbc) { /* SBC or ZBC */
1633 arr[n++] = 0x89; /* ATA information */
1634 arr[n++] = 0xb0; /* Block limits */
1635 arr[n++] = 0xb1; /* Block characteristics */
1637 arr[n++] = 0xb2; /* LB Provisioning */
1639 arr[n++] = 0xb6; /* ZB dev. char. */
1641 arr[3] = n - 4; /* number of supported VPD pages */
1642 } else if (0x80 == cmd[2]) { /* unit serial number */
1643 arr[1] = cmd[2]; /*sanity */
1645 memcpy(&arr[4], lu_id_str, len);
1646 } else if (0x83 == cmd[2]) { /* device identification */
1647 arr[1] = cmd[2]; /*sanity */
1648 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1649 target_dev_id, lu_id_num,
1652 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1653 arr[1] = cmd[2]; /*sanity */
1654 arr[3] = inquiry_vpd_84(&arr[4]);
1655 } else if (0x85 == cmd[2]) { /* Management network addresses */
1656 arr[1] = cmd[2]; /*sanity */
1657 arr[3] = inquiry_vpd_85(&arr[4]);
1658 } else if (0x86 == cmd[2]) { /* extended inquiry */
1659 arr[1] = cmd[2]; /*sanity */
1660 arr[3] = 0x3c; /* number of following entries */
1661 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1662 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1663 else if (have_dif_prot)
1664 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1666 arr[4] = 0x0; /* no protection stuff */
1667 arr[5] = 0x7; /* head of q, ordered + simple q's */
1668 } else if (0x87 == cmd[2]) { /* mode page policy */
1669 arr[1] = cmd[2]; /*sanity */
1670 arr[3] = 0x8; /* number of following entries */
1671 arr[4] = 0x2; /* disconnect-reconnect mp */
1672 arr[6] = 0x80; /* mlus, shared */
1673 arr[8] = 0x18; /* protocol specific lu */
1674 arr[10] = 0x82; /* mlus, per initiator port */
1675 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1676 arr[1] = cmd[2]; /*sanity */
1677 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1678 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1679 arr[1] = cmd[2]; /*sanity */
1680 n = inquiry_vpd_89(&arr[4]);
1681 put_unaligned_be16(n, arr + 2);
1682 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1683 arr[1] = cmd[2]; /*sanity */
1684 arr[3] = inquiry_vpd_b0(&arr[4]);
1685 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1686 arr[1] = cmd[2]; /*sanity */
1687 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1688 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1689 arr[1] = cmd[2]; /*sanity */
1690 arr[3] = inquiry_vpd_b2(&arr[4]);
1691 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1692 arr[1] = cmd[2]; /*sanity */
1693 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1695 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1697 return check_condition_result;
1699 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1700 ret = fill_from_dev_buffer(scp, arr,
1701 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1705 /* drops through here for a standard inquiry */
1706 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1707 arr[2] = sdebug_scsi_level;
1708 arr[3] = 2; /* response_data_format==2 */
1709 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1710 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1711 if (sdebug_vpd_use_hostno == 0)
1712 arr[5] |= 0x10; /* claim: implicit TPGS */
1713 arr[6] = 0x10; /* claim: MultiP */
1714 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1715 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1716 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1717 memcpy(&arr[16], sdebug_inq_product_id, 16);
1718 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1719 /* Use Vendor Specific area to place driver date in ASCII hex */
1720 memcpy(&arr[36], sdebug_version_date, 8);
1721 /* version descriptors (2 bytes each) follow */
1722 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1723 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1725 if (is_disk) { /* SBC-4 no version claimed */
1726 put_unaligned_be16(0x600, arr + n);
1728 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1729 put_unaligned_be16(0x525, arr + n);
1731 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1732 put_unaligned_be16(0x624, arr + n);
1735 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1736 ret = fill_from_dev_buffer(scp, arr,
1737 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1742 /* See resp_iec_m_pg() for how this data is manipulated */
1743 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1746 static int resp_requests(struct scsi_cmnd *scp,
1747 struct sdebug_dev_info *devip)
1749 unsigned char *cmd = scp->cmnd;
1750 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1751 bool dsense = !!(cmd[1] & 1);
1752 u32 alloc_len = cmd[4];
1754 int stopped_state = atomic_read(&devip->stopped);
1756 memset(arr, 0, sizeof(arr));
1757 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1761 arr[2] = LOGICAL_UNIT_NOT_READY;
1762 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1766 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1767 arr[7] = 0xa; /* 18 byte sense buffer */
1768 arr[12] = LOGICAL_UNIT_NOT_READY;
1769 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1771 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1772 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1775 arr[1] = 0x0; /* NO_SENSE in sense_key */
1776 arr[2] = THRESHOLD_EXCEEDED;
1777 arr[3] = 0xff; /* Failure prediction(false) */
1781 arr[2] = 0x0; /* NO_SENSE in sense_key */
1782 arr[7] = 0xa; /* 18 byte sense buffer */
1783 arr[12] = THRESHOLD_EXCEEDED;
1784 arr[13] = 0xff; /* Failure prediction(false) */
1786 } else { /* nothing to report */
1789 memset(arr, 0, len);
1792 memset(arr, 0, len);
1797 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1800 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1802 unsigned char *cmd = scp->cmnd;
1803 int power_cond, want_stop, stopped_state;
1806 power_cond = (cmd[4] & 0xf0) >> 4;
1808 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1809 return check_condition_result;
1811 want_stop = !(cmd[4] & 1);
1812 stopped_state = atomic_read(&devip->stopped);
1813 if (stopped_state == 2) {
1814 ktime_t now_ts = ktime_get_boottime();
1816 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1817 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1819 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1820 /* tur_ms_to_ready timer extinguished */
1821 atomic_set(&devip->stopped, 0);
1825 if (stopped_state == 2) {
1827 stopped_state = 1; /* dummy up success */
1828 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1829 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1830 return check_condition_result;
1834 changing = (stopped_state != want_stop);
1836 atomic_xchg(&devip->stopped, want_stop);
1837 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1838 return SDEG_RES_IMMED_MASK;
1843 static sector_t get_sdebug_capacity(void)
1845 static const unsigned int gibibyte = 1073741824;
1847 if (sdebug_virtual_gb > 0)
1848 return (sector_t)sdebug_virtual_gb *
1849 (gibibyte / sdebug_sector_size);
1851 return sdebug_store_sectors;
1854 #define SDEBUG_READCAP_ARR_SZ 8
1855 static int resp_readcap(struct scsi_cmnd *scp,
1856 struct sdebug_dev_info *devip)
1858 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1861 /* following just in case virtual_gb changed */
1862 sdebug_capacity = get_sdebug_capacity();
1863 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1864 if (sdebug_capacity < 0xffffffff) {
1865 capac = (unsigned int)sdebug_capacity - 1;
1866 put_unaligned_be32(capac, arr + 0);
1868 put_unaligned_be32(0xffffffff, arr + 0);
1869 put_unaligned_be16(sdebug_sector_size, arr + 6);
1870 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1873 #define SDEBUG_READCAP16_ARR_SZ 32
1874 static int resp_readcap16(struct scsi_cmnd *scp,
1875 struct sdebug_dev_info *devip)
1877 unsigned char *cmd = scp->cmnd;
1878 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1881 alloc_len = get_unaligned_be32(cmd + 10);
1882 /* following just in case virtual_gb changed */
1883 sdebug_capacity = get_sdebug_capacity();
1884 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1885 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1886 put_unaligned_be32(sdebug_sector_size, arr + 8);
1887 arr[13] = sdebug_physblk_exp & 0xf;
1888 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1890 if (scsi_debug_lbp()) {
1891 arr[14] |= 0x80; /* LBPME */
1892 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1893 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1894 * in the wider field maps to 0 in this field.
1896 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1901 * Since the scsi_debug READ CAPACITY implementation always reports the
1902 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1904 if (devip->zmodel == BLK_ZONED_HM)
1907 arr[15] = sdebug_lowest_aligned & 0xff;
1909 if (have_dif_prot) {
1910 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1911 arr[12] |= 1; /* PROT_EN */
1914 return fill_from_dev_buffer(scp, arr,
1915 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1918 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1920 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1921 struct sdebug_dev_info *devip)
1923 unsigned char *cmd = scp->cmnd;
1925 int host_no = devip->sdbg_host->shost->host_no;
1926 int port_group_a, port_group_b, port_a, port_b;
1930 alen = get_unaligned_be32(cmd + 6);
1931 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1933 return DID_REQUEUE << 16;
1935 * EVPD page 0x88 states we have two ports, one
1936 * real and a fake port with no device connected.
1937 * So we create two port groups with one port each
1938 * and set the group with port B to unavailable.
1940 port_a = 0x1; /* relative port A */
1941 port_b = 0x2; /* relative port B */
1942 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1943 (devip->channel & 0x7f);
1944 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1945 (devip->channel & 0x7f) + 0x80;
1948 * The asymmetric access state is cycled according to the host_id.
1951 if (sdebug_vpd_use_hostno == 0) {
1952 arr[n++] = host_no % 3; /* Asymm access state */
1953 arr[n++] = 0x0F; /* claim: all states are supported */
1955 arr[n++] = 0x0; /* Active/Optimized path */
1956 arr[n++] = 0x01; /* only support active/optimized paths */
1958 put_unaligned_be16(port_group_a, arr + n);
1960 arr[n++] = 0; /* Reserved */
1961 arr[n++] = 0; /* Status code */
1962 arr[n++] = 0; /* Vendor unique */
1963 arr[n++] = 0x1; /* One port per group */
1964 arr[n++] = 0; /* Reserved */
1965 arr[n++] = 0; /* Reserved */
1966 put_unaligned_be16(port_a, arr + n);
1968 arr[n++] = 3; /* Port unavailable */
1969 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1970 put_unaligned_be16(port_group_b, arr + n);
1972 arr[n++] = 0; /* Reserved */
1973 arr[n++] = 0; /* Status code */
1974 arr[n++] = 0; /* Vendor unique */
1975 arr[n++] = 0x1; /* One port per group */
1976 arr[n++] = 0; /* Reserved */
1977 arr[n++] = 0; /* Reserved */
1978 put_unaligned_be16(port_b, arr + n);
1982 put_unaligned_be32(rlen, arr + 0);
1985 * Return the smallest value of either
1986 * - The allocated length
1987 * - The constructed command length
1988 * - The maximum array size
1990 rlen = min(alen, n);
1991 ret = fill_from_dev_buffer(scp, arr,
1992 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1997 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1998 struct sdebug_dev_info *devip)
2001 u8 reporting_opts, req_opcode, sdeb_i, supp;
2003 u32 alloc_len, a_len;
2004 int k, offset, len, errsts, count, bump, na;
2005 const struct opcode_info_t *oip;
2006 const struct opcode_info_t *r_oip;
2008 u8 *cmd = scp->cmnd;
2010 rctd = !!(cmd[2] & 0x80);
2011 reporting_opts = cmd[2] & 0x7;
2012 req_opcode = cmd[3];
2013 req_sa = get_unaligned_be16(cmd + 4);
2014 alloc_len = get_unaligned_be32(cmd + 6);
2015 if (alloc_len < 4 || alloc_len > 0xffff) {
2016 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2017 return check_condition_result;
2019 if (alloc_len > 8192)
2023 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2025 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2027 return check_condition_result;
2029 switch (reporting_opts) {
2030 case 0: /* all commands */
2031 /* count number of commands */
2032 for (count = 0, oip = opcode_info_arr;
2033 oip->num_attached != 0xff; ++oip) {
2034 if (F_INV_OP & oip->flags)
2036 count += (oip->num_attached + 1);
2038 bump = rctd ? 20 : 8;
2039 put_unaligned_be32(count * bump, arr);
2040 for (offset = 4, oip = opcode_info_arr;
2041 oip->num_attached != 0xff && offset < a_len; ++oip) {
2042 if (F_INV_OP & oip->flags)
2044 na = oip->num_attached;
2045 arr[offset] = oip->opcode;
2046 put_unaligned_be16(oip->sa, arr + offset + 2);
2048 arr[offset + 5] |= 0x2;
2049 if (FF_SA & oip->flags)
2050 arr[offset + 5] |= 0x1;
2051 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2053 put_unaligned_be16(0xa, arr + offset + 8);
2055 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2056 if (F_INV_OP & oip->flags)
2059 arr[offset] = oip->opcode;
2060 put_unaligned_be16(oip->sa, arr + offset + 2);
2062 arr[offset + 5] |= 0x2;
2063 if (FF_SA & oip->flags)
2064 arr[offset + 5] |= 0x1;
2065 put_unaligned_be16(oip->len_mask[0],
2068 put_unaligned_be16(0xa,
2075 case 1: /* one command: opcode only */
2076 case 2: /* one command: opcode plus service action */
2077 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2078 sdeb_i = opcode_ind_arr[req_opcode];
2079 oip = &opcode_info_arr[sdeb_i];
2080 if (F_INV_OP & oip->flags) {
2084 if (1 == reporting_opts) {
2085 if (FF_SA & oip->flags) {
2086 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2089 return check_condition_result;
2092 } else if (2 == reporting_opts &&
2093 0 == (FF_SA & oip->flags)) {
2094 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2095 kfree(arr); /* point at requested sa */
2096 return check_condition_result;
2098 if (0 == (FF_SA & oip->flags) &&
2099 req_opcode == oip->opcode)
2101 else if (0 == (FF_SA & oip->flags)) {
2102 na = oip->num_attached;
2103 for (k = 0, oip = oip->arrp; k < na;
2105 if (req_opcode == oip->opcode)
2108 supp = (k >= na) ? 1 : 3;
2109 } else if (req_sa != oip->sa) {
2110 na = oip->num_attached;
2111 for (k = 0, oip = oip->arrp; k < na;
2113 if (req_sa == oip->sa)
2116 supp = (k >= na) ? 1 : 3;
2120 u = oip->len_mask[0];
2121 put_unaligned_be16(u, arr + 2);
2122 arr[4] = oip->opcode;
2123 for (k = 1; k < u; ++k)
2124 arr[4 + k] = (k < 16) ?
2125 oip->len_mask[k] : 0xff;
2130 arr[1] = (rctd ? 0x80 : 0) | supp;
2132 put_unaligned_be16(0xa, arr + offset);
2137 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2139 return check_condition_result;
2141 offset = (offset < a_len) ? offset : a_len;
2142 len = (offset < alloc_len) ? offset : alloc_len;
2143 errsts = fill_from_dev_buffer(scp, arr, len);
2148 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2149 struct sdebug_dev_info *devip)
2154 u8 *cmd = scp->cmnd;
2156 memset(arr, 0, sizeof(arr));
2157 repd = !!(cmd[2] & 0x80);
2158 alloc_len = get_unaligned_be32(cmd + 6);
2159 if (alloc_len < 4) {
2160 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2161 return check_condition_result;
2163 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2164 arr[1] = 0x1; /* ITNRS */
2171 len = (len < alloc_len) ? len : alloc_len;
2172 return fill_from_dev_buffer(scp, arr, len);
2175 /* <<Following mode page info copied from ST318451LW>> */
2177 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2178 { /* Read-Write Error Recovery page for mode_sense */
2179 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2182 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2184 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2185 return sizeof(err_recov_pg);
2188 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2189 { /* Disconnect-Reconnect page for mode_sense */
2190 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2191 0, 0, 0, 0, 0, 0, 0, 0};
2193 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2195 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2196 return sizeof(disconnect_pg);
2199 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2200 { /* Format device page for mode_sense */
2201 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2202 0, 0, 0, 0, 0, 0, 0, 0,
2203 0, 0, 0, 0, 0x40, 0, 0, 0};
2205 memcpy(p, format_pg, sizeof(format_pg));
2206 put_unaligned_be16(sdebug_sectors_per, p + 10);
2207 put_unaligned_be16(sdebug_sector_size, p + 12);
2208 if (sdebug_removable)
2209 p[20] |= 0x20; /* should agree with INQUIRY */
2211 memset(p + 2, 0, sizeof(format_pg) - 2);
2212 return sizeof(format_pg);
2215 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2216 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2219 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2220 { /* Caching page for mode_sense */
2221 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2222 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2223 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2224 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2226 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2227 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2228 memcpy(p, caching_pg, sizeof(caching_pg));
2230 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2231 else if (2 == pcontrol)
2232 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2233 return sizeof(caching_pg);
2236 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2239 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2240 { /* Control mode page for mode_sense */
2241 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2243 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2247 ctrl_m_pg[2] |= 0x4;
2249 ctrl_m_pg[2] &= ~0x4;
2252 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2254 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2256 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2257 else if (2 == pcontrol)
2258 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2259 return sizeof(ctrl_m_pg);
2263 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2264 { /* Informational Exceptions control mode page for mode_sense */
2265 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2267 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2270 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2272 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2273 else if (2 == pcontrol)
2274 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2275 return sizeof(iec_m_pg);
2278 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2279 { /* SAS SSP mode page - short format for mode_sense */
2280 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2281 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2283 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2285 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2286 return sizeof(sas_sf_m_pg);
2290 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2292 { /* SAS phy control and discover mode page for mode_sense */
2293 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2294 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2295 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2296 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2297 0x2, 0, 0, 0, 0, 0, 0, 0,
2298 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2299 0, 0, 0, 0, 0, 0, 0, 0,
2300 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2301 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2302 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2303 0x3, 0, 0, 0, 0, 0, 0, 0,
2304 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2305 0, 0, 0, 0, 0, 0, 0, 0,
2309 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2310 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2311 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2312 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2313 port_a = target_dev_id + 1;
2314 port_b = port_a + 1;
2315 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2316 put_unaligned_be32(port_a, p + 20);
2317 put_unaligned_be32(port_b, p + 48 + 20);
2319 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2320 return sizeof(sas_pcd_m_pg);
2323 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2324 { /* SAS SSP shared protocol specific port mode subpage */
2325 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2326 0, 0, 0, 0, 0, 0, 0, 0,
2329 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2331 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2332 return sizeof(sas_sha_m_pg);
2335 #define SDEBUG_MAX_MSENSE_SZ 256
2337 static int resp_mode_sense(struct scsi_cmnd *scp,
2338 struct sdebug_dev_info *devip)
2340 int pcontrol, pcode, subpcode, bd_len;
2341 unsigned char dev_spec;
2342 u32 alloc_len, offset, len;
2344 int target = scp->device->id;
2346 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2347 unsigned char *cmd = scp->cmnd;
2348 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2350 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2351 pcontrol = (cmd[2] & 0xc0) >> 6;
2352 pcode = cmd[2] & 0x3f;
2354 msense_6 = (MODE_SENSE == cmd[0]);
2355 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2356 is_disk = (sdebug_ptype == TYPE_DISK);
2357 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2358 if ((is_disk || is_zbc) && !dbd)
2359 bd_len = llbaa ? 16 : 8;
2362 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2363 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2364 if (0x3 == pcontrol) { /* Saving values not supported */
2365 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2366 return check_condition_result;
2368 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2369 (devip->target * 1000) - 3;
2370 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2371 if (is_disk || is_zbc) {
2372 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2384 arr[4] = 0x1; /* set LONGLBA bit */
2385 arr[7] = bd_len; /* assume 255 or less */
2389 if ((bd_len > 0) && (!sdebug_capacity))
2390 sdebug_capacity = get_sdebug_capacity();
2393 if (sdebug_capacity > 0xfffffffe)
2394 put_unaligned_be32(0xffffffff, ap + 0);
2396 put_unaligned_be32(sdebug_capacity, ap + 0);
2397 put_unaligned_be16(sdebug_sector_size, ap + 6);
2400 } else if (16 == bd_len) {
2401 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2402 put_unaligned_be32(sdebug_sector_size, ap + 12);
2407 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2408 /* TODO: Control Extension page */
2409 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2410 return check_condition_result;
2415 case 0x1: /* Read-Write error recovery page, direct access */
2416 len = resp_err_recov_pg(ap, pcontrol, target);
2419 case 0x2: /* Disconnect-Reconnect page, all devices */
2420 len = resp_disconnect_pg(ap, pcontrol, target);
2423 case 0x3: /* Format device page, direct access */
2425 len = resp_format_pg(ap, pcontrol, target);
2430 case 0x8: /* Caching page, direct access */
2431 if (is_disk || is_zbc) {
2432 len = resp_caching_pg(ap, pcontrol, target);
2437 case 0xa: /* Control Mode page, all devices */
2438 len = resp_ctrl_m_pg(ap, pcontrol, target);
2441 case 0x19: /* if spc==1 then sas phy, control+discover */
2442 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2443 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2444 return check_condition_result;
2447 if ((0x0 == subpcode) || (0xff == subpcode))
2448 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2449 if ((0x1 == subpcode) || (0xff == subpcode))
2450 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2452 if ((0x2 == subpcode) || (0xff == subpcode))
2453 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2456 case 0x1c: /* Informational Exceptions Mode page, all devices */
2457 len = resp_iec_m_pg(ap, pcontrol, target);
2460 case 0x3f: /* Read all Mode pages */
2461 if ((0 == subpcode) || (0xff == subpcode)) {
2462 len = resp_err_recov_pg(ap, pcontrol, target);
2463 len += resp_disconnect_pg(ap + len, pcontrol, target);
2465 len += resp_format_pg(ap + len, pcontrol,
2467 len += resp_caching_pg(ap + len, pcontrol,
2469 } else if (is_zbc) {
2470 len += resp_caching_pg(ap + len, pcontrol,
2473 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2474 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2475 if (0xff == subpcode) {
2476 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2477 target, target_dev_id);
2478 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2480 len += resp_iec_m_pg(ap + len, pcontrol, target);
2483 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2484 return check_condition_result;
2492 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2493 return check_condition_result;
2496 arr[0] = offset - 1;
2498 put_unaligned_be16((offset - 2), arr + 0);
2499 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2502 #define SDEBUG_MAX_MSELECT_SZ 512
2504 static int resp_mode_select(struct scsi_cmnd *scp,
2505 struct sdebug_dev_info *devip)
2507 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2508 int param_len, res, mpage;
2509 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2510 unsigned char *cmd = scp->cmnd;
2511 int mselect6 = (MODE_SELECT == cmd[0]);
2513 memset(arr, 0, sizeof(arr));
2516 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2517 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2518 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2519 return check_condition_result;
2521 res = fetch_to_dev_buffer(scp, arr, param_len);
2523 return DID_ERROR << 16;
2524 else if (sdebug_verbose && (res < param_len))
2525 sdev_printk(KERN_INFO, scp->device,
2526 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2527 __func__, param_len, res);
2528 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2529 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2530 off = bd_len + (mselect6 ? 4 : 8);
2531 if (md_len > 2 || off >= res) {
2532 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2533 return check_condition_result;
2535 mpage = arr[off] & 0x3f;
2536 ps = !!(arr[off] & 0x80);
2538 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2539 return check_condition_result;
2541 spf = !!(arr[off] & 0x40);
2542 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2544 if ((pg_len + off) > param_len) {
2545 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2546 PARAMETER_LIST_LENGTH_ERR, 0);
2547 return check_condition_result;
2550 case 0x8: /* Caching Mode page */
2551 if (caching_pg[1] == arr[off + 1]) {
2552 memcpy(caching_pg + 2, arr + off + 2,
2553 sizeof(caching_pg) - 2);
2554 goto set_mode_changed_ua;
2557 case 0xa: /* Control Mode page */
2558 if (ctrl_m_pg[1] == arr[off + 1]) {
2559 memcpy(ctrl_m_pg + 2, arr + off + 2,
2560 sizeof(ctrl_m_pg) - 2);
2561 if (ctrl_m_pg[4] & 0x8)
2565 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2566 goto set_mode_changed_ua;
2569 case 0x1c: /* Informational Exceptions Mode page */
2570 if (iec_m_pg[1] == arr[off + 1]) {
2571 memcpy(iec_m_pg + 2, arr + off + 2,
2572 sizeof(iec_m_pg) - 2);
2573 goto set_mode_changed_ua;
2579 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2580 return check_condition_result;
2581 set_mode_changed_ua:
2582 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2586 static int resp_temp_l_pg(unsigned char *arr)
2588 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2589 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2592 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2593 return sizeof(temp_l_pg);
2596 static int resp_ie_l_pg(unsigned char *arr)
2598 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2601 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2602 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2603 arr[4] = THRESHOLD_EXCEEDED;
2606 return sizeof(ie_l_pg);
2609 static int resp_env_rep_l_spg(unsigned char *arr)
2611 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2612 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2613 0x1, 0x0, 0x23, 0x8,
2614 0x0, 55, 72, 35, 55, 45, 0, 0,
2617 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2618 return sizeof(env_rep_l_spg);
2621 #define SDEBUG_MAX_LSENSE_SZ 512
2623 static int resp_log_sense(struct scsi_cmnd *scp,
2624 struct sdebug_dev_info *devip)
2626 int ppc, sp, pcode, subpcode;
2627 u32 alloc_len, len, n;
2628 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2629 unsigned char *cmd = scp->cmnd;
2631 memset(arr, 0, sizeof(arr));
2635 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2636 return check_condition_result;
2638 pcode = cmd[2] & 0x3f;
2639 subpcode = cmd[3] & 0xff;
2640 alloc_len = get_unaligned_be16(cmd + 7);
2642 if (0 == subpcode) {
2644 case 0x0: /* Supported log pages log page */
2646 arr[n++] = 0x0; /* this page */
2647 arr[n++] = 0xd; /* Temperature */
2648 arr[n++] = 0x2f; /* Informational exceptions */
2651 case 0xd: /* Temperature log page */
2652 arr[3] = resp_temp_l_pg(arr + 4);
2654 case 0x2f: /* Informational exceptions log page */
2655 arr[3] = resp_ie_l_pg(arr + 4);
2658 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2659 return check_condition_result;
2661 } else if (0xff == subpcode) {
2665 case 0x0: /* Supported log pages and subpages log page */
2668 arr[n++] = 0x0; /* 0,0 page */
2670 arr[n++] = 0xff; /* this page */
2672 arr[n++] = 0x0; /* Temperature */
2674 arr[n++] = 0x1; /* Environment reporting */
2676 arr[n++] = 0xff; /* all 0xd subpages */
2678 arr[n++] = 0x0; /* Informational exceptions */
2680 arr[n++] = 0xff; /* all 0x2f subpages */
2683 case 0xd: /* Temperature subpages */
2686 arr[n++] = 0x0; /* Temperature */
2688 arr[n++] = 0x1; /* Environment reporting */
2690 arr[n++] = 0xff; /* these subpages */
2693 case 0x2f: /* Informational exceptions subpages */
2696 arr[n++] = 0x0; /* Informational exceptions */
2698 arr[n++] = 0xff; /* these subpages */
2702 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2703 return check_condition_result;
2705 } else if (subpcode > 0) {
2708 if (pcode == 0xd && subpcode == 1)
2709 arr[3] = resp_env_rep_l_spg(arr + 4);
2711 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2712 return check_condition_result;
2715 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2716 return check_condition_result;
2718 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2719 return fill_from_dev_buffer(scp, arr,
2720 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2723 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2725 return devip->nr_zones != 0;
2728 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2729 unsigned long long lba)
2731 u32 zno = lba >> devip->zsize_shift;
2732 struct sdeb_zone_state *zsp;
2734 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2735 return &devip->zstate[zno];
2738 * If the zone capacity is less than the zone size, adjust for gap
2741 zno = 2 * zno - devip->nr_conv_zones;
2742 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2743 zsp = &devip->zstate[zno];
2744 if (lba >= zsp->z_start + zsp->z_size)
2746 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2750 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2752 return zsp->z_type == ZBC_ZTYPE_CNV;
2755 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2757 return zsp->z_type == ZBC_ZTYPE_GAP;
2760 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2762 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2765 static void zbc_close_zone(struct sdebug_dev_info *devip,
2766 struct sdeb_zone_state *zsp)
2768 enum sdebug_z_cond zc;
2770 if (!zbc_zone_is_seq(zsp))
2774 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2777 if (zc == ZC2_IMPLICIT_OPEN)
2778 devip->nr_imp_open--;
2780 devip->nr_exp_open--;
2782 if (zsp->z_wp == zsp->z_start) {
2783 zsp->z_cond = ZC1_EMPTY;
2785 zsp->z_cond = ZC4_CLOSED;
2790 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2792 struct sdeb_zone_state *zsp = &devip->zstate[0];
2795 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2796 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2797 zbc_close_zone(devip, zsp);
2803 static void zbc_open_zone(struct sdebug_dev_info *devip,
2804 struct sdeb_zone_state *zsp, bool explicit)
2806 enum sdebug_z_cond zc;
2808 if (!zbc_zone_is_seq(zsp))
2812 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2813 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2816 /* Close an implicit open zone if necessary */
2817 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2818 zbc_close_zone(devip, zsp);
2819 else if (devip->max_open &&
2820 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2821 zbc_close_imp_open_zone(devip);
2823 if (zsp->z_cond == ZC4_CLOSED)
2826 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2827 devip->nr_exp_open++;
2829 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2830 devip->nr_imp_open++;
2834 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2835 struct sdeb_zone_state *zsp)
2837 switch (zsp->z_cond) {
2838 case ZC2_IMPLICIT_OPEN:
2839 devip->nr_imp_open--;
2841 case ZC3_EXPLICIT_OPEN:
2842 devip->nr_exp_open--;
2845 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2846 zsp->z_start, zsp->z_cond);
2849 zsp->z_cond = ZC5_FULL;
2852 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2853 unsigned long long lba, unsigned int num)
2855 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2856 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2858 if (!zbc_zone_is_seq(zsp))
2861 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2863 if (zsp->z_wp >= zend)
2864 zbc_set_zone_full(devip, zsp);
2869 if (lba != zsp->z_wp)
2870 zsp->z_non_seq_resource = true;
2876 } else if (end > zsp->z_wp) {
2882 if (zsp->z_wp >= zend)
2883 zbc_set_zone_full(devip, zsp);
2889 zend = zsp->z_start + zsp->z_size;
2894 static int check_zbc_access_params(struct scsi_cmnd *scp,
2895 unsigned long long lba, unsigned int num, bool write)
2897 struct scsi_device *sdp = scp->device;
2898 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2899 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2900 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2903 if (devip->zmodel == BLK_ZONED_HA)
2905 /* For host-managed, reads cannot cross zone types boundaries */
2906 if (zsp->z_type != zsp_end->z_type) {
2907 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2910 return check_condition_result;
2915 /* Writing into a gap zone is not allowed */
2916 if (zbc_zone_is_gap(zsp)) {
2917 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2918 ATTEMPT_ACCESS_GAP);
2919 return check_condition_result;
2922 /* No restrictions for writes within conventional zones */
2923 if (zbc_zone_is_conv(zsp)) {
2924 if (!zbc_zone_is_conv(zsp_end)) {
2925 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2927 WRITE_BOUNDARY_ASCQ);
2928 return check_condition_result;
2933 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2934 /* Writes cannot cross sequential zone boundaries */
2935 if (zsp_end != zsp) {
2936 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2938 WRITE_BOUNDARY_ASCQ);
2939 return check_condition_result;
2941 /* Cannot write full zones */
2942 if (zsp->z_cond == ZC5_FULL) {
2943 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2944 INVALID_FIELD_IN_CDB, 0);
2945 return check_condition_result;
2947 /* Writes must be aligned to the zone WP */
2948 if (lba != zsp->z_wp) {
2949 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2951 UNALIGNED_WRITE_ASCQ);
2952 return check_condition_result;
2956 /* Handle implicit open of closed and empty zones */
2957 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2958 if (devip->max_open &&
2959 devip->nr_exp_open >= devip->max_open) {
2960 mk_sense_buffer(scp, DATA_PROTECT,
2963 return check_condition_result;
2965 zbc_open_zone(devip, zsp, false);
2971 static inline int check_device_access_params
2972 (struct scsi_cmnd *scp, unsigned long long lba,
2973 unsigned int num, bool write)
2975 struct scsi_device *sdp = scp->device;
2976 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2978 if (lba + num > sdebug_capacity) {
2979 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2980 return check_condition_result;
2982 /* transfer length excessive (tie in to block limits VPD page) */
2983 if (num > sdebug_store_sectors) {
2984 /* needs work to find which cdb byte 'num' comes from */
2985 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2986 return check_condition_result;
2988 if (write && unlikely(sdebug_wp)) {
2989 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2990 return check_condition_result;
2992 if (sdebug_dev_is_zoned(devip))
2993 return check_zbc_access_params(scp, lba, num, write);
2999 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3000 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3001 * that access any of the "stores" in struct sdeb_store_info should call this
3002 * function with bug_if_fake_rw set to true.
3004 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3005 bool bug_if_fake_rw)
3007 if (sdebug_fake_rw) {
3008 BUG_ON(bug_if_fake_rw); /* See note above */
3011 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3014 /* Returns number of bytes copied or -1 if error. */
3015 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3016 u32 sg_skip, u64 lba, u32 num, bool do_write)
3019 u64 block, rest = 0;
3020 enum dma_data_direction dir;
3021 struct scsi_data_buffer *sdb = &scp->sdb;
3025 dir = DMA_TO_DEVICE;
3026 write_since_sync = true;
3028 dir = DMA_FROM_DEVICE;
3031 if (!sdb->length || !sip)
3033 if (scp->sc_data_direction != dir)
3037 block = do_div(lba, sdebug_store_sectors);
3038 if (block + num > sdebug_store_sectors)
3039 rest = block + num - sdebug_store_sectors;
3041 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3042 fsp + (block * sdebug_sector_size),
3043 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3044 if (ret != (num - rest) * sdebug_sector_size)
3048 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3049 fsp, rest * sdebug_sector_size,
3050 sg_skip + ((num - rest) * sdebug_sector_size),
3057 /* Returns number of bytes copied or -1 if error. */
3058 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3060 struct scsi_data_buffer *sdb = &scp->sdb;
3064 if (scp->sc_data_direction != DMA_TO_DEVICE)
3066 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3067 num * sdebug_sector_size, 0, true);
3070 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3071 * arr into sip->storep+lba and return true. If comparison fails then
3073 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3074 const u8 *arr, bool compare_only)
3077 u64 block, rest = 0;
3078 u32 store_blks = sdebug_store_sectors;
3079 u32 lb_size = sdebug_sector_size;
3080 u8 *fsp = sip->storep;
3082 block = do_div(lba, store_blks);
3083 if (block + num > store_blks)
3084 rest = block + num - store_blks;
3086 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3090 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3096 arr += num * lb_size;
3097 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3099 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3103 static __be16 dif_compute_csum(const void *buf, int len)
3108 csum = (__force __be16)ip_compute_csum(buf, len);
3110 csum = cpu_to_be16(crc_t10dif(buf, len));
3115 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3116 sector_t sector, u32 ei_lba)
3118 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3120 if (sdt->guard_tag != csum) {
3121 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3122 (unsigned long)sector,
3123 be16_to_cpu(sdt->guard_tag),
3127 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3128 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3129 pr_err("REF check failed on sector %lu\n",
3130 (unsigned long)sector);
3133 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3134 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3135 pr_err("REF check failed on sector %lu\n",
3136 (unsigned long)sector);
3142 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3143 unsigned int sectors, bool read)
3147 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3148 scp->device->hostdata, true);
3149 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3150 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3151 struct sg_mapping_iter miter;
3153 /* Bytes of protection data to copy into sgl */
3154 resid = sectors * sizeof(*dif_storep);
3156 sg_miter_start(&miter, scsi_prot_sglist(scp),
3157 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3158 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3160 while (sg_miter_next(&miter) && resid > 0) {
3161 size_t len = min_t(size_t, miter.length, resid);
3162 void *start = dif_store(sip, sector);
3165 if (dif_store_end < start + len)
3166 rest = start + len - dif_store_end;
3171 memcpy(paddr, start, len - rest);
3173 memcpy(start, paddr, len - rest);
3177 memcpy(paddr + len - rest, dif_storep, rest);
3179 memcpy(dif_storep, paddr + len - rest, rest);
3182 sector += len / sizeof(*dif_storep);
3185 sg_miter_stop(&miter);
3188 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3189 unsigned int sectors, u32 ei_lba)
3194 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3195 scp->device->hostdata, true);
3196 struct t10_pi_tuple *sdt;
3198 for (i = 0; i < sectors; i++, ei_lba++) {
3199 sector = start_sec + i;
3200 sdt = dif_store(sip, sector);
3202 if (sdt->app_tag == cpu_to_be16(0xffff))
3206 * Because scsi_debug acts as both initiator and
3207 * target we proceed to verify the PI even if
3208 * RDPROTECT=3. This is done so the "initiator" knows
3209 * which type of error to return. Otherwise we would
3210 * have to iterate over the PI twice.
3212 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3213 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3222 dif_copy_prot(scp, start_sec, sectors, true);
3229 sdeb_read_lock(struct sdeb_store_info *sip)
3231 if (sdebug_no_rwlock) {
3233 __acquire(&sip->macc_lck);
3235 __acquire(&sdeb_fake_rw_lck);
3238 read_lock(&sip->macc_lck);
3240 read_lock(&sdeb_fake_rw_lck);
3245 sdeb_read_unlock(struct sdeb_store_info *sip)
3247 if (sdebug_no_rwlock) {
3249 __release(&sip->macc_lck);
3251 __release(&sdeb_fake_rw_lck);
3254 read_unlock(&sip->macc_lck);
3256 read_unlock(&sdeb_fake_rw_lck);
3261 sdeb_write_lock(struct sdeb_store_info *sip)
3263 if (sdebug_no_rwlock) {
3265 __acquire(&sip->macc_lck);
3267 __acquire(&sdeb_fake_rw_lck);
3270 write_lock(&sip->macc_lck);
3272 write_lock(&sdeb_fake_rw_lck);
3277 sdeb_write_unlock(struct sdeb_store_info *sip)
3279 if (sdebug_no_rwlock) {
3281 __release(&sip->macc_lck);
3283 __release(&sdeb_fake_rw_lck);
3286 write_unlock(&sip->macc_lck);
3288 write_unlock(&sdeb_fake_rw_lck);
3292 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3299 struct sdeb_store_info *sip = devip2sip(devip, true);
3300 u8 *cmd = scp->cmnd;
3305 lba = get_unaligned_be64(cmd + 2);
3306 num = get_unaligned_be32(cmd + 10);
3311 lba = get_unaligned_be32(cmd + 2);
3312 num = get_unaligned_be16(cmd + 7);
3317 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3318 (u32)(cmd[1] & 0x1f) << 16;
3319 num = (0 == cmd[4]) ? 256 : cmd[4];
3324 lba = get_unaligned_be32(cmd + 2);
3325 num = get_unaligned_be32(cmd + 6);
3328 case XDWRITEREAD_10:
3330 lba = get_unaligned_be32(cmd + 2);
3331 num = get_unaligned_be16(cmd + 7);
3334 default: /* assume READ(32) */
3335 lba = get_unaligned_be64(cmd + 12);
3336 ei_lba = get_unaligned_be32(cmd + 20);
3337 num = get_unaligned_be32(cmd + 28);
3341 if (unlikely(have_dif_prot && check_prot)) {
3342 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3344 mk_sense_invalid_opcode(scp);
3345 return check_condition_result;
3347 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3348 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3349 (cmd[1] & 0xe0) == 0)
3350 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3353 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3354 atomic_read(&sdeb_inject_pending))) {
3356 atomic_set(&sdeb_inject_pending, 0);
3359 ret = check_device_access_params(scp, lba, num, false);
3362 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3363 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3364 ((lba + num) > sdebug_medium_error_start))) {
3365 /* claim unrecoverable read error */
3366 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3367 /* set info field and valid bit for fixed descriptor */
3368 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3369 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3370 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3371 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3372 put_unaligned_be32(ret, scp->sense_buffer + 3);
3374 scsi_set_resid(scp, scsi_bufflen(scp));
3375 return check_condition_result;
3378 sdeb_read_lock(sip);
3381 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3382 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3383 case 1: /* Guard tag error */
3384 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3385 sdeb_read_unlock(sip);
3386 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3387 return check_condition_result;
3388 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3389 sdeb_read_unlock(sip);
3390 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3391 return illegal_condition_result;
3394 case 3: /* Reference tag error */
3395 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3396 sdeb_read_unlock(sip);
3397 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3398 return check_condition_result;
3399 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3400 sdeb_read_unlock(sip);
3401 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3402 return illegal_condition_result;
3408 ret = do_device_access(sip, scp, 0, lba, num, false);
3409 sdeb_read_unlock(sip);
3410 if (unlikely(ret == -1))
3411 return DID_ERROR << 16;
3413 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3415 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3416 atomic_read(&sdeb_inject_pending))) {
3417 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3418 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3419 atomic_set(&sdeb_inject_pending, 0);
3420 return check_condition_result;
3421 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3422 /* Logical block guard check failed */
3423 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3424 atomic_set(&sdeb_inject_pending, 0);
3425 return illegal_condition_result;
3426 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3427 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3428 atomic_set(&sdeb_inject_pending, 0);
3429 return illegal_condition_result;
3435 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3436 unsigned int sectors, u32 ei_lba)
3439 struct t10_pi_tuple *sdt;
3441 sector_t sector = start_sec;
3444 struct sg_mapping_iter diter;
3445 struct sg_mapping_iter piter;
3447 BUG_ON(scsi_sg_count(SCpnt) == 0);
3448 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3450 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3451 scsi_prot_sg_count(SCpnt),
3452 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3453 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3454 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3456 /* For each protection page */
3457 while (sg_miter_next(&piter)) {
3459 if (WARN_ON(!sg_miter_next(&diter))) {
3464 for (ppage_offset = 0; ppage_offset < piter.length;
3465 ppage_offset += sizeof(struct t10_pi_tuple)) {
3466 /* If we're at the end of the current
3467 * data page advance to the next one
3469 if (dpage_offset >= diter.length) {
3470 if (WARN_ON(!sg_miter_next(&diter))) {
3477 sdt = piter.addr + ppage_offset;
3478 daddr = diter.addr + dpage_offset;
3480 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3481 ret = dif_verify(sdt, daddr, sector, ei_lba);
3488 dpage_offset += sdebug_sector_size;
3490 diter.consumed = dpage_offset;
3491 sg_miter_stop(&diter);
3493 sg_miter_stop(&piter);
3495 dif_copy_prot(SCpnt, start_sec, sectors, false);
3502 sg_miter_stop(&diter);
3503 sg_miter_stop(&piter);
3507 static unsigned long lba_to_map_index(sector_t lba)
3509 if (sdebug_unmap_alignment)
3510 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3511 sector_div(lba, sdebug_unmap_granularity);
3515 static sector_t map_index_to_lba(unsigned long index)
3517 sector_t lba = index * sdebug_unmap_granularity;
3519 if (sdebug_unmap_alignment)
3520 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3524 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3528 unsigned int mapped;
3529 unsigned long index;
3532 index = lba_to_map_index(lba);
3533 mapped = test_bit(index, sip->map_storep);
3536 next = find_next_zero_bit(sip->map_storep, map_size, index);
3538 next = find_next_bit(sip->map_storep, map_size, index);
3540 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3545 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3548 sector_t end = lba + len;
3551 unsigned long index = lba_to_map_index(lba);
3553 if (index < map_size)
3554 set_bit(index, sip->map_storep);
3556 lba = map_index_to_lba(index + 1);
3560 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3563 sector_t end = lba + len;
3564 u8 *fsp = sip->storep;
3567 unsigned long index = lba_to_map_index(lba);
3569 if (lba == map_index_to_lba(index) &&
3570 lba + sdebug_unmap_granularity <= end &&
3572 clear_bit(index, sip->map_storep);
3573 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3574 memset(fsp + lba * sdebug_sector_size,
3575 (sdebug_lbprz & 1) ? 0 : 0xff,
3576 sdebug_sector_size *
3577 sdebug_unmap_granularity);
3579 if (sip->dif_storep) {
3580 memset(sip->dif_storep + lba, 0xff,
3581 sizeof(*sip->dif_storep) *
3582 sdebug_unmap_granularity);
3585 lba = map_index_to_lba(index + 1);
3589 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3596 struct sdeb_store_info *sip = devip2sip(devip, true);
3597 u8 *cmd = scp->cmnd;
3602 lba = get_unaligned_be64(cmd + 2);
3603 num = get_unaligned_be32(cmd + 10);
3608 lba = get_unaligned_be32(cmd + 2);
3609 num = get_unaligned_be16(cmd + 7);
3614 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3615 (u32)(cmd[1] & 0x1f) << 16;
3616 num = (0 == cmd[4]) ? 256 : cmd[4];
3621 lba = get_unaligned_be32(cmd + 2);
3622 num = get_unaligned_be32(cmd + 6);
3625 case 0x53: /* XDWRITEREAD(10) */
3627 lba = get_unaligned_be32(cmd + 2);
3628 num = get_unaligned_be16(cmd + 7);
3631 default: /* assume WRITE(32) */
3632 lba = get_unaligned_be64(cmd + 12);
3633 ei_lba = get_unaligned_be32(cmd + 20);
3634 num = get_unaligned_be32(cmd + 28);
3638 if (unlikely(have_dif_prot && check_prot)) {
3639 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3641 mk_sense_invalid_opcode(scp);
3642 return check_condition_result;
3644 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3645 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3646 (cmd[1] & 0xe0) == 0)
3647 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3651 sdeb_write_lock(sip);
3652 ret = check_device_access_params(scp, lba, num, true);
3654 sdeb_write_unlock(sip);
3659 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3660 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3661 case 1: /* Guard tag error */
3662 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3663 sdeb_write_unlock(sip);
3664 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3665 return illegal_condition_result;
3666 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3667 sdeb_write_unlock(sip);
3668 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3669 return check_condition_result;
3672 case 3: /* Reference tag error */
3673 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3674 sdeb_write_unlock(sip);
3675 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3676 return illegal_condition_result;
3677 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3678 sdeb_write_unlock(sip);
3679 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3680 return check_condition_result;
3686 ret = do_device_access(sip, scp, 0, lba, num, true);
3687 if (unlikely(scsi_debug_lbp()))
3688 map_region(sip, lba, num);
3689 /* If ZBC zone then bump its write pointer */
3690 if (sdebug_dev_is_zoned(devip))
3691 zbc_inc_wp(devip, lba, num);
3692 sdeb_write_unlock(sip);
3693 if (unlikely(-1 == ret))
3694 return DID_ERROR << 16;
3695 else if (unlikely(sdebug_verbose &&
3696 (ret < (num * sdebug_sector_size))))
3697 sdev_printk(KERN_INFO, scp->device,
3698 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3699 my_name, num * sdebug_sector_size, ret);
3701 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3702 atomic_read(&sdeb_inject_pending))) {
3703 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3704 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3705 atomic_set(&sdeb_inject_pending, 0);
3706 return check_condition_result;
3707 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3708 /* Logical block guard check failed */
3709 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3710 atomic_set(&sdeb_inject_pending, 0);
3711 return illegal_condition_result;
3712 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3713 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3714 atomic_set(&sdeb_inject_pending, 0);
3715 return illegal_condition_result;
3722 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3723 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3725 static int resp_write_scat(struct scsi_cmnd *scp,
3726 struct sdebug_dev_info *devip)
3728 u8 *cmd = scp->cmnd;
3731 struct sdeb_store_info *sip = devip2sip(devip, true);
3733 u16 lbdof, num_lrd, k;
3734 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3735 u32 lb_size = sdebug_sector_size;
3740 static const u32 lrd_size = 32; /* + parameter list header size */
3742 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3744 wrprotect = (cmd[10] >> 5) & 0x7;
3745 lbdof = get_unaligned_be16(cmd + 12);
3746 num_lrd = get_unaligned_be16(cmd + 16);
3747 bt_len = get_unaligned_be32(cmd + 28);
3748 } else { /* that leaves WRITE SCATTERED(16) */
3750 wrprotect = (cmd[2] >> 5) & 0x7;
3751 lbdof = get_unaligned_be16(cmd + 4);
3752 num_lrd = get_unaligned_be16(cmd + 8);
3753 bt_len = get_unaligned_be32(cmd + 10);
3754 if (unlikely(have_dif_prot)) {
3755 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3757 mk_sense_invalid_opcode(scp);
3758 return illegal_condition_result;
3760 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3761 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3763 sdev_printk(KERN_ERR, scp->device,
3764 "Unprotected WR to DIF device\n");
3767 if ((num_lrd == 0) || (bt_len == 0))
3768 return 0; /* T10 says these do-nothings are not errors */
3771 sdev_printk(KERN_INFO, scp->device,
3772 "%s: %s: LB Data Offset field bad\n",
3774 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3775 return illegal_condition_result;
3777 lbdof_blen = lbdof * lb_size;
3778 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3780 sdev_printk(KERN_INFO, scp->device,
3781 "%s: %s: LBA range descriptors don't fit\n",
3783 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3784 return illegal_condition_result;
3786 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3788 return SCSI_MLQUEUE_HOST_BUSY;
3790 sdev_printk(KERN_INFO, scp->device,
3791 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3792 my_name, __func__, lbdof_blen);
3793 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3795 ret = DID_ERROR << 16;
3799 sdeb_write_lock(sip);
3800 sg_off = lbdof_blen;
3801 /* Spec says Buffer xfer Length field in number of LBs in dout */
3803 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3804 lba = get_unaligned_be64(up + 0);
3805 num = get_unaligned_be32(up + 8);
3807 sdev_printk(KERN_INFO, scp->device,
3808 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3809 my_name, __func__, k, lba, num, sg_off);
3812 ret = check_device_access_params(scp, lba, num, true);
3814 goto err_out_unlock;
3815 num_by = num * lb_size;
3816 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3818 if ((cum_lb + num) > bt_len) {
3820 sdev_printk(KERN_INFO, scp->device,
3821 "%s: %s: sum of blocks > data provided\n",
3823 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3825 ret = illegal_condition_result;
3826 goto err_out_unlock;
3830 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3831 int prot_ret = prot_verify_write(scp, lba, num,
3835 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3837 ret = illegal_condition_result;
3838 goto err_out_unlock;
3842 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3843 /* If ZBC zone then bump its write pointer */
3844 if (sdebug_dev_is_zoned(devip))
3845 zbc_inc_wp(devip, lba, num);
3846 if (unlikely(scsi_debug_lbp()))
3847 map_region(sip, lba, num);
3848 if (unlikely(-1 == ret)) {
3849 ret = DID_ERROR << 16;
3850 goto err_out_unlock;
3851 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3852 sdev_printk(KERN_INFO, scp->device,
3853 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3854 my_name, num_by, ret);
3856 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3857 atomic_read(&sdeb_inject_pending))) {
3858 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3859 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3860 atomic_set(&sdeb_inject_pending, 0);
3861 ret = check_condition_result;
3862 goto err_out_unlock;
3863 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3864 /* Logical block guard check failed */
3865 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3866 atomic_set(&sdeb_inject_pending, 0);
3867 ret = illegal_condition_result;
3868 goto err_out_unlock;
3869 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3870 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3871 atomic_set(&sdeb_inject_pending, 0);
3872 ret = illegal_condition_result;
3873 goto err_out_unlock;
3881 sdeb_write_unlock(sip);
3887 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3888 u32 ei_lba, bool unmap, bool ndob)
3890 struct scsi_device *sdp = scp->device;
3891 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3892 unsigned long long i;
3894 u32 lb_size = sdebug_sector_size;
3896 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3897 scp->device->hostdata, true);
3901 sdeb_write_lock(sip);
3903 ret = check_device_access_params(scp, lba, num, true);
3905 sdeb_write_unlock(sip);
3909 if (unmap && scsi_debug_lbp()) {
3910 unmap_region(sip, lba, num);
3914 block = do_div(lbaa, sdebug_store_sectors);
3915 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3917 fs1p = fsp + (block * lb_size);
3919 memset(fs1p, 0, lb_size);
3922 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3925 sdeb_write_unlock(sip);
3926 return DID_ERROR << 16;
3927 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3928 sdev_printk(KERN_INFO, scp->device,
3929 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3930 my_name, "write same", lb_size, ret);
3932 /* Copy first sector to remaining blocks */
3933 for (i = 1 ; i < num ; i++) {
3935 block = do_div(lbaa, sdebug_store_sectors);
3936 memmove(fsp + (block * lb_size), fs1p, lb_size);
3938 if (scsi_debug_lbp())
3939 map_region(sip, lba, num);
3940 /* If ZBC zone then bump its write pointer */
3941 if (sdebug_dev_is_zoned(devip))
3942 zbc_inc_wp(devip, lba, num);
3944 sdeb_write_unlock(sip);
3949 static int resp_write_same_10(struct scsi_cmnd *scp,
3950 struct sdebug_dev_info *devip)
3952 u8 *cmd = scp->cmnd;
3959 if (sdebug_lbpws10 == 0) {
3960 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3961 return check_condition_result;
3965 lba = get_unaligned_be32(cmd + 2);
3966 num = get_unaligned_be16(cmd + 7);
3967 if (num > sdebug_write_same_length) {
3968 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3969 return check_condition_result;
3971 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3974 static int resp_write_same_16(struct scsi_cmnd *scp,
3975 struct sdebug_dev_info *devip)
3977 u8 *cmd = scp->cmnd;
3984 if (cmd[1] & 0x8) { /* UNMAP */
3985 if (sdebug_lbpws == 0) {
3986 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3987 return check_condition_result;
3991 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3993 lba = get_unaligned_be64(cmd + 2);
3994 num = get_unaligned_be32(cmd + 10);
3995 if (num > sdebug_write_same_length) {
3996 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3997 return check_condition_result;
3999 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4002 /* Note the mode field is in the same position as the (lower) service action
4003 * field. For the Report supported operation codes command, SPC-4 suggests
4004 * each mode of this command should be reported separately; for future. */
4005 static int resp_write_buffer(struct scsi_cmnd *scp,
4006 struct sdebug_dev_info *devip)
4008 u8 *cmd = scp->cmnd;
4009 struct scsi_device *sdp = scp->device;
4010 struct sdebug_dev_info *dp;
4013 mode = cmd[1] & 0x1f;
4015 case 0x4: /* download microcode (MC) and activate (ACT) */
4016 /* set UAs on this device only */
4017 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4018 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4020 case 0x5: /* download MC, save and ACT */
4021 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4023 case 0x6: /* download MC with offsets and ACT */
4024 /* set UAs on most devices (LUs) in this target */
4025 list_for_each_entry(dp,
4026 &devip->sdbg_host->dev_info_list,
4028 if (dp->target == sdp->id) {
4029 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4031 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4035 case 0x7: /* download MC with offsets, save, and ACT */
4036 /* set UA on all devices (LUs) in this target */
4037 list_for_each_entry(dp,
4038 &devip->sdbg_host->dev_info_list,
4040 if (dp->target == sdp->id)
4041 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4045 /* do nothing for this command for other mode values */
4051 static int resp_comp_write(struct scsi_cmnd *scp,
4052 struct sdebug_dev_info *devip)
4054 u8 *cmd = scp->cmnd;
4056 struct sdeb_store_info *sip = devip2sip(devip, true);
4059 u32 lb_size = sdebug_sector_size;
4064 lba = get_unaligned_be64(cmd + 2);
4065 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4067 return 0; /* degenerate case, not an error */
4068 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4070 mk_sense_invalid_opcode(scp);
4071 return check_condition_result;
4073 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4074 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4075 (cmd[1] & 0xe0) == 0)
4076 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4078 ret = check_device_access_params(scp, lba, num, false);
4082 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4084 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4086 return check_condition_result;
4089 sdeb_write_lock(sip);
4091 ret = do_dout_fetch(scp, dnum, arr);
4093 retval = DID_ERROR << 16;
4095 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4096 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4097 "indicated=%u, IO sent=%d bytes\n", my_name,
4098 dnum * lb_size, ret);
4099 if (!comp_write_worker(sip, lba, num, arr, false)) {
4100 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4101 retval = check_condition_result;
4104 if (scsi_debug_lbp())
4105 map_region(sip, lba, num);
4107 sdeb_write_unlock(sip);
4112 struct unmap_block_desc {
4118 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4121 struct unmap_block_desc *desc;
4122 struct sdeb_store_info *sip = devip2sip(devip, true);
4123 unsigned int i, payload_len, descriptors;
4126 if (!scsi_debug_lbp())
4127 return 0; /* fib and say its done */
4128 payload_len = get_unaligned_be16(scp->cmnd + 7);
4129 BUG_ON(scsi_bufflen(scp) != payload_len);
4131 descriptors = (payload_len - 8) / 16;
4132 if (descriptors > sdebug_unmap_max_desc) {
4133 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4134 return check_condition_result;
4137 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4139 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4141 return check_condition_result;
4144 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4146 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4147 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4149 desc = (void *)&buf[8];
4151 sdeb_write_lock(sip);
4153 for (i = 0 ; i < descriptors ; i++) {
4154 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4155 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4157 ret = check_device_access_params(scp, lba, num, true);
4161 unmap_region(sip, lba, num);
4167 sdeb_write_unlock(sip);
4173 #define SDEBUG_GET_LBA_STATUS_LEN 32
4175 static int resp_get_lba_status(struct scsi_cmnd *scp,
4176 struct sdebug_dev_info *devip)
4178 u8 *cmd = scp->cmnd;
4180 u32 alloc_len, mapped, num;
4182 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4184 lba = get_unaligned_be64(cmd + 2);
4185 alloc_len = get_unaligned_be32(cmd + 10);
4190 ret = check_device_access_params(scp, lba, 1, false);
4194 if (scsi_debug_lbp()) {
4195 struct sdeb_store_info *sip = devip2sip(devip, true);
4197 mapped = map_state(sip, lba, &num);
4200 /* following just in case virtual_gb changed */
4201 sdebug_capacity = get_sdebug_capacity();
4202 if (sdebug_capacity - lba <= 0xffffffff)
4203 num = sdebug_capacity - lba;
4208 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4209 put_unaligned_be32(20, arr); /* Parameter Data Length */
4210 put_unaligned_be64(lba, arr + 8); /* LBA */
4211 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4212 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4214 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4217 static int resp_sync_cache(struct scsi_cmnd *scp,
4218 struct sdebug_dev_info *devip)
4223 u8 *cmd = scp->cmnd;
4225 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4226 lba = get_unaligned_be32(cmd + 2);
4227 num_blocks = get_unaligned_be16(cmd + 7);
4228 } else { /* SYNCHRONIZE_CACHE(16) */
4229 lba = get_unaligned_be64(cmd + 2);
4230 num_blocks = get_unaligned_be32(cmd + 10);
4232 if (lba + num_blocks > sdebug_capacity) {
4233 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4234 return check_condition_result;
4236 if (!write_since_sync || (cmd[1] & 0x2))
4237 res = SDEG_RES_IMMED_MASK;
4238 else /* delay if write_since_sync and IMMED clear */
4239 write_since_sync = false;
4244 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4245 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4246 * a GOOD status otherwise. Model a disk with a big cache and yield
4247 * CONDITION MET. Actually tries to bring range in main memory into the
4248 * cache associated with the CPU(s).
4250 static int resp_pre_fetch(struct scsi_cmnd *scp,
4251 struct sdebug_dev_info *devip)
4255 u64 block, rest = 0;
4257 u8 *cmd = scp->cmnd;
4258 struct sdeb_store_info *sip = devip2sip(devip, true);
4259 u8 *fsp = sip->storep;
4261 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4262 lba = get_unaligned_be32(cmd + 2);
4263 nblks = get_unaligned_be16(cmd + 7);
4264 } else { /* PRE-FETCH(16) */
4265 lba = get_unaligned_be64(cmd + 2);
4266 nblks = get_unaligned_be32(cmd + 10);
4268 if (lba + nblks > sdebug_capacity) {
4269 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4270 return check_condition_result;
4274 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4275 block = do_div(lba, sdebug_store_sectors);
4276 if (block + nblks > sdebug_store_sectors)
4277 rest = block + nblks - sdebug_store_sectors;
4279 /* Try to bring the PRE-FETCH range into CPU's cache */
4280 sdeb_read_lock(sip);
4281 prefetch_range(fsp + (sdebug_sector_size * block),
4282 (nblks - rest) * sdebug_sector_size);
4284 prefetch_range(fsp, rest * sdebug_sector_size);
4285 sdeb_read_unlock(sip);
4288 res = SDEG_RES_IMMED_MASK;
4289 return res | condition_met_result;
4292 #define RL_BUCKET_ELEMS 8
4294 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4295 * (W-LUN), the normal Linux scanning logic does not associate it with a
4296 * device (e.g. /dev/sg7). The following magic will make that association:
4297 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4298 * where <n> is a host number. If there are multiple targets in a host then
4299 * the above will associate a W-LUN to each target. To only get a W-LUN
4300 * for target 2, then use "echo '- 2 49409' > scan" .
4302 static int resp_report_luns(struct scsi_cmnd *scp,
4303 struct sdebug_dev_info *devip)
4305 unsigned char *cmd = scp->cmnd;
4306 unsigned int alloc_len;
4307 unsigned char select_report;
4309 struct scsi_lun *lun_p;
4310 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4311 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4312 unsigned int wlun_cnt; /* report luns W-LUN count */
4313 unsigned int tlun_cnt; /* total LUN count */
4314 unsigned int rlen; /* response length (in bytes) */
4316 unsigned int off_rsp = 0;
4317 const int sz_lun = sizeof(struct scsi_lun);
4319 clear_luns_changed_on_target(devip);
4321 select_report = cmd[2];
4322 alloc_len = get_unaligned_be32(cmd + 6);
4324 if (alloc_len < 4) {
4325 pr_err("alloc len too small %d\n", alloc_len);
4326 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4327 return check_condition_result;
4330 switch (select_report) {
4331 case 0: /* all LUNs apart from W-LUNs */
4332 lun_cnt = sdebug_max_luns;
4335 case 1: /* only W-LUNs */
4339 case 2: /* all LUNs */
4340 lun_cnt = sdebug_max_luns;
4343 case 0x10: /* only administrative LUs */
4344 case 0x11: /* see SPC-5 */
4345 case 0x12: /* only subsiduary LUs owned by referenced LU */
4347 pr_debug("select report invalid %d\n", select_report);
4348 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4349 return check_condition_result;
4352 if (sdebug_no_lun_0 && (lun_cnt > 0))
4355 tlun_cnt = lun_cnt + wlun_cnt;
4356 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4357 scsi_set_resid(scp, scsi_bufflen(scp));
4358 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4359 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4361 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4362 lun = sdebug_no_lun_0 ? 1 : 0;
4363 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4364 memset(arr, 0, sizeof(arr));
4365 lun_p = (struct scsi_lun *)&arr[0];
4367 put_unaligned_be32(rlen, &arr[0]);
4371 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4372 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4374 int_to_scsilun(lun++, lun_p);
4375 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4376 lun_p->scsi_lun[0] |= 0x40;
4378 if (j < RL_BUCKET_ELEMS)
4381 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4387 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4391 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4395 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4397 bool is_bytchk3 = false;
4400 u32 vnum, a_num, off;
4401 const u32 lb_size = sdebug_sector_size;
4404 u8 *cmd = scp->cmnd;
4405 struct sdeb_store_info *sip = devip2sip(devip, true);
4407 bytchk = (cmd[1] >> 1) & 0x3;
4409 return 0; /* always claim internal verify okay */
4410 } else if (bytchk == 2) {
4411 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4412 return check_condition_result;
4413 } else if (bytchk == 3) {
4414 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4418 lba = get_unaligned_be64(cmd + 2);
4419 vnum = get_unaligned_be32(cmd + 10);
4421 case VERIFY: /* is VERIFY(10) */
4422 lba = get_unaligned_be32(cmd + 2);
4423 vnum = get_unaligned_be16(cmd + 7);
4426 mk_sense_invalid_opcode(scp);
4427 return check_condition_result;
4430 return 0; /* not an error */
4431 a_num = is_bytchk3 ? 1 : vnum;
4432 /* Treat following check like one for read (i.e. no write) access */
4433 ret = check_device_access_params(scp, lba, a_num, false);
4437 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4439 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4441 return check_condition_result;
4443 /* Not changing store, so only need read access */
4444 sdeb_read_lock(sip);
4446 ret = do_dout_fetch(scp, a_num, arr);
4448 ret = DID_ERROR << 16;
4450 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4451 sdev_printk(KERN_INFO, scp->device,
4452 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4453 my_name, __func__, a_num * lb_size, ret);
4456 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4457 memcpy(arr + off, arr, lb_size);
4460 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4461 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4462 ret = check_condition_result;
4466 sdeb_read_unlock(sip);
4471 #define RZONES_DESC_HD 64
4473 /* Report zones depending on start LBA and reporting options */
4474 static int resp_report_zones(struct scsi_cmnd *scp,
4475 struct sdebug_dev_info *devip)
4477 unsigned int rep_max_zones, nrz = 0;
4479 u32 alloc_len, rep_opts, rep_len;
4482 u8 *arr = NULL, *desc;
4483 u8 *cmd = scp->cmnd;
4484 struct sdeb_zone_state *zsp = NULL;
4485 struct sdeb_store_info *sip = devip2sip(devip, false);
4487 if (!sdebug_dev_is_zoned(devip)) {
4488 mk_sense_invalid_opcode(scp);
4489 return check_condition_result;
4491 zs_lba = get_unaligned_be64(cmd + 2);
4492 alloc_len = get_unaligned_be32(cmd + 10);
4494 return 0; /* not an error */
4495 rep_opts = cmd[14] & 0x3f;
4496 partial = cmd[14] & 0x80;
4498 if (zs_lba >= sdebug_capacity) {
4499 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4500 return check_condition_result;
4503 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4505 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4507 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4509 return check_condition_result;
4512 sdeb_read_lock(sip);
4515 for (lba = zs_lba; lba < sdebug_capacity;
4516 lba = zsp->z_start + zsp->z_size) {
4517 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4519 zsp = zbc_zone(devip, lba);
4526 if (zsp->z_cond != ZC1_EMPTY)
4530 /* Implicit open zones */
4531 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4535 /* Explicit open zones */
4536 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4541 if (zsp->z_cond != ZC4_CLOSED)
4546 if (zsp->z_cond != ZC5_FULL)
4553 * Read-only, offline, reset WP recommended are
4554 * not emulated: no zones to report;
4558 /* non-seq-resource set */
4559 if (!zsp->z_non_seq_resource)
4563 /* All zones except gap zones. */
4564 if (zbc_zone_is_gap(zsp))
4568 /* Not write pointer (conventional) zones */
4569 if (zbc_zone_is_seq(zsp))
4573 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4574 INVALID_FIELD_IN_CDB, 0);
4575 ret = check_condition_result;
4579 if (nrz < rep_max_zones) {
4580 /* Fill zone descriptor */
4581 desc[0] = zsp->z_type;
4582 desc[1] = zsp->z_cond << 4;
4583 if (zsp->z_non_seq_resource)
4585 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4586 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4587 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4591 if (partial && nrz >= rep_max_zones)
4598 /* Zone list length. */
4599 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4601 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4602 /* Zone starting LBA granularity. */
4603 if (devip->zcap < devip->zsize)
4604 put_unaligned_be64(devip->zsize, arr + 16);
4606 rep_len = (unsigned long)desc - (unsigned long)arr;
4607 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4610 sdeb_read_unlock(sip);
4615 /* Logic transplanted from tcmu-runner, file_zbc.c */
4616 static void zbc_open_all(struct sdebug_dev_info *devip)
4618 struct sdeb_zone_state *zsp = &devip->zstate[0];
4621 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4622 if (zsp->z_cond == ZC4_CLOSED)
4623 zbc_open_zone(devip, &devip->zstate[i], true);
4627 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4631 enum sdebug_z_cond zc;
4632 u8 *cmd = scp->cmnd;
4633 struct sdeb_zone_state *zsp;
4634 bool all = cmd[14] & 0x01;
4635 struct sdeb_store_info *sip = devip2sip(devip, false);
4637 if (!sdebug_dev_is_zoned(devip)) {
4638 mk_sense_invalid_opcode(scp);
4639 return check_condition_result;
4642 sdeb_write_lock(sip);
4645 /* Check if all closed zones can be open */
4646 if (devip->max_open &&
4647 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4648 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4650 res = check_condition_result;
4653 /* Open all closed zones */
4654 zbc_open_all(devip);
4658 /* Open the specified zone */
4659 z_id = get_unaligned_be64(cmd + 2);
4660 if (z_id >= sdebug_capacity) {
4661 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4662 res = check_condition_result;
4666 zsp = zbc_zone(devip, z_id);
4667 if (z_id != zsp->z_start) {
4668 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4669 res = check_condition_result;
4672 if (zbc_zone_is_conv(zsp)) {
4673 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4674 res = check_condition_result;
4679 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4682 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4683 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4685 res = check_condition_result;
4689 zbc_open_zone(devip, zsp, true);
4691 sdeb_write_unlock(sip);
4695 static void zbc_close_all(struct sdebug_dev_info *devip)
4699 for (i = 0; i < devip->nr_zones; i++)
4700 zbc_close_zone(devip, &devip->zstate[i]);
4703 static int resp_close_zone(struct scsi_cmnd *scp,
4704 struct sdebug_dev_info *devip)
4708 u8 *cmd = scp->cmnd;
4709 struct sdeb_zone_state *zsp;
4710 bool all = cmd[14] & 0x01;
4711 struct sdeb_store_info *sip = devip2sip(devip, false);
4713 if (!sdebug_dev_is_zoned(devip)) {
4714 mk_sense_invalid_opcode(scp);
4715 return check_condition_result;
4718 sdeb_write_lock(sip);
4721 zbc_close_all(devip);
4725 /* Close specified zone */
4726 z_id = get_unaligned_be64(cmd + 2);
4727 if (z_id >= sdebug_capacity) {
4728 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4729 res = check_condition_result;
4733 zsp = zbc_zone(devip, z_id);
4734 if (z_id != zsp->z_start) {
4735 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4736 res = check_condition_result;
4739 if (zbc_zone_is_conv(zsp)) {
4740 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4741 res = check_condition_result;
4745 zbc_close_zone(devip, zsp);
4747 sdeb_write_unlock(sip);
4751 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4752 struct sdeb_zone_state *zsp, bool empty)
4754 enum sdebug_z_cond zc = zsp->z_cond;
4756 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4757 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4758 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4759 zbc_close_zone(devip, zsp);
4760 if (zsp->z_cond == ZC4_CLOSED)
4762 zsp->z_wp = zsp->z_start + zsp->z_size;
4763 zsp->z_cond = ZC5_FULL;
4767 static void zbc_finish_all(struct sdebug_dev_info *devip)
4771 for (i = 0; i < devip->nr_zones; i++)
4772 zbc_finish_zone(devip, &devip->zstate[i], false);
4775 static int resp_finish_zone(struct scsi_cmnd *scp,
4776 struct sdebug_dev_info *devip)
4778 struct sdeb_zone_state *zsp;
4781 u8 *cmd = scp->cmnd;
4782 bool all = cmd[14] & 0x01;
4783 struct sdeb_store_info *sip = devip2sip(devip, false);
4785 if (!sdebug_dev_is_zoned(devip)) {
4786 mk_sense_invalid_opcode(scp);
4787 return check_condition_result;
4790 sdeb_write_lock(sip);
4793 zbc_finish_all(devip);
4797 /* Finish the specified zone */
4798 z_id = get_unaligned_be64(cmd + 2);
4799 if (z_id >= sdebug_capacity) {
4800 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4801 res = check_condition_result;
4805 zsp = zbc_zone(devip, z_id);
4806 if (z_id != zsp->z_start) {
4807 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4808 res = check_condition_result;
4811 if (zbc_zone_is_conv(zsp)) {
4812 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4813 res = check_condition_result;
4817 zbc_finish_zone(devip, zsp, true);
4819 sdeb_write_unlock(sip);
4823 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4824 struct sdeb_zone_state *zsp)
4826 enum sdebug_z_cond zc;
4827 struct sdeb_store_info *sip = devip2sip(devip, false);
4829 if (!zbc_zone_is_seq(zsp))
4833 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4834 zbc_close_zone(devip, zsp);
4836 if (zsp->z_cond == ZC4_CLOSED)
4839 if (zsp->z_wp > zsp->z_start)
4840 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4841 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4843 zsp->z_non_seq_resource = false;
4844 zsp->z_wp = zsp->z_start;
4845 zsp->z_cond = ZC1_EMPTY;
4848 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4852 for (i = 0; i < devip->nr_zones; i++)
4853 zbc_rwp_zone(devip, &devip->zstate[i]);
4856 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4858 struct sdeb_zone_state *zsp;
4861 u8 *cmd = scp->cmnd;
4862 bool all = cmd[14] & 0x01;
4863 struct sdeb_store_info *sip = devip2sip(devip, false);
4865 if (!sdebug_dev_is_zoned(devip)) {
4866 mk_sense_invalid_opcode(scp);
4867 return check_condition_result;
4870 sdeb_write_lock(sip);
4877 z_id = get_unaligned_be64(cmd + 2);
4878 if (z_id >= sdebug_capacity) {
4879 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4880 res = check_condition_result;
4884 zsp = zbc_zone(devip, z_id);
4885 if (z_id != zsp->z_start) {
4886 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4887 res = check_condition_result;
4890 if (zbc_zone_is_conv(zsp)) {
4891 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4892 res = check_condition_result;
4896 zbc_rwp_zone(devip, zsp);
4898 sdeb_write_unlock(sip);
4902 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4905 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4907 hwq = blk_mq_unique_tag_to_hwq(tag);
4909 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4910 if (WARN_ON_ONCE(hwq >= submit_queues))
4913 return sdebug_q_arr + hwq;
4916 static u32 get_tag(struct scsi_cmnd *cmnd)
4918 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4921 /* Queued (deferred) command completions converge here. */
4922 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4924 bool aborted = sd_dp->aborted;
4927 unsigned long iflags;
4928 struct sdebug_queue *sqp;
4929 struct sdebug_queued_cmd *sqcp;
4930 struct scsi_cmnd *scp;
4931 struct sdebug_dev_info *devip;
4933 if (unlikely(aborted))
4934 sd_dp->aborted = false;
4935 qc_idx = sd_dp->qc_idx;
4936 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4937 if (sdebug_statistics) {
4938 atomic_inc(&sdebug_completions);
4939 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4940 atomic_inc(&sdebug_miss_cpus);
4942 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4943 pr_err("wild qc_idx=%d\n", qc_idx);
4946 spin_lock_irqsave(&sqp->qc_lock, iflags);
4947 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4948 sqcp = &sqp->qc_arr[qc_idx];
4950 if (unlikely(scp == NULL)) {
4951 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4952 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4953 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4956 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4958 atomic_dec(&devip->num_in_q);
4960 pr_err("devip=NULL\n");
4961 if (unlikely(atomic_read(&retired_max_queue) > 0))
4964 sqcp->a_cmnd = NULL;
4965 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4966 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4967 pr_err("Unexpected completion\n");
4971 if (unlikely(retiring)) { /* user has reduced max_queue */
4974 retval = atomic_read(&retired_max_queue);
4975 if (qc_idx >= retval) {
4976 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4977 pr_err("index %d too large\n", retval);
4980 k = find_last_bit(sqp->in_use_bm, retval);
4981 if ((k < sdebug_max_queue) || (k == retval))
4982 atomic_set(&retired_max_queue, 0);
4984 atomic_set(&retired_max_queue, k + 1);
4986 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4987 if (unlikely(aborted)) {
4989 pr_info("bypassing scsi_done() due to aborted cmd\n");
4992 scsi_done(scp); /* callback to mid level */
4995 /* When high resolution timer goes off this function is called. */
4996 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4998 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5000 sdebug_q_cmd_complete(sd_dp);
5001 return HRTIMER_NORESTART;
5004 /* When work queue schedules work, it calls this function. */
5005 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5007 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5009 sdebug_q_cmd_complete(sd_dp);
5012 static bool got_shared_uuid;
5013 static uuid_t shared_uuid;
5015 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5017 struct sdeb_zone_state *zsp;
5018 sector_t capacity = get_sdebug_capacity();
5019 sector_t conv_capacity;
5020 sector_t zstart = 0;
5024 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5025 * a zone size allowing for at least 4 zones on the device. Otherwise,
5026 * use the specified zone size checking that at least 2 zones can be
5027 * created for the device.
5029 if (!sdeb_zbc_zone_size_mb) {
5030 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5031 >> ilog2(sdebug_sector_size);
5032 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5034 if (devip->zsize < 2) {
5035 pr_err("Device capacity too small\n");
5039 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5040 pr_err("Zone size is not a power of 2\n");
5043 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5044 >> ilog2(sdebug_sector_size);
5045 if (devip->zsize >= capacity) {
5046 pr_err("Zone size too large for device capacity\n");
5051 devip->zsize_shift = ilog2(devip->zsize);
5052 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5054 if (sdeb_zbc_zone_cap_mb == 0) {
5055 devip->zcap = devip->zsize;
5057 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5058 ilog2(sdebug_sector_size);
5059 if (devip->zcap > devip->zsize) {
5060 pr_err("Zone capacity too large\n");
5065 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5066 if (conv_capacity >= capacity) {
5067 pr_err("Number of conventional zones too large\n");
5070 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5071 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5073 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5075 /* Add gap zones if zone capacity is smaller than the zone size */
5076 if (devip->zcap < devip->zsize)
5077 devip->nr_zones += devip->nr_seq_zones;
5079 if (devip->zmodel == BLK_ZONED_HM) {
5080 /* zbc_max_open_zones can be 0, meaning "not reported" */
5081 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5082 devip->max_open = (devip->nr_zones - 1) / 2;
5084 devip->max_open = sdeb_zbc_max_open;
5087 devip->zstate = kcalloc(devip->nr_zones,
5088 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5092 for (i = 0; i < devip->nr_zones; i++) {
5093 zsp = &devip->zstate[i];
5095 zsp->z_start = zstart;
5097 if (i < devip->nr_conv_zones) {
5098 zsp->z_type = ZBC_ZTYPE_CNV;
5099 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5100 zsp->z_wp = (sector_t)-1;
5102 min_t(u64, devip->zsize, capacity - zstart);
5103 } else if ((zstart & (devip->zsize - 1)) == 0) {
5104 if (devip->zmodel == BLK_ZONED_HM)
5105 zsp->z_type = ZBC_ZTYPE_SWR;
5107 zsp->z_type = ZBC_ZTYPE_SWP;
5108 zsp->z_cond = ZC1_EMPTY;
5109 zsp->z_wp = zsp->z_start;
5111 min_t(u64, devip->zcap, capacity - zstart);
5113 zsp->z_type = ZBC_ZTYPE_GAP;
5114 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5115 zsp->z_wp = (sector_t)-1;
5116 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5120 WARN_ON_ONCE((int)zsp->z_size <= 0);
5121 zstart += zsp->z_size;
5127 static struct sdebug_dev_info *sdebug_device_create(
5128 struct sdebug_host_info *sdbg_host, gfp_t flags)
5130 struct sdebug_dev_info *devip;
5132 devip = kzalloc(sizeof(*devip), flags);
5134 if (sdebug_uuid_ctl == 1)
5135 uuid_gen(&devip->lu_name);
5136 else if (sdebug_uuid_ctl == 2) {
5137 if (got_shared_uuid)
5138 devip->lu_name = shared_uuid;
5140 uuid_gen(&shared_uuid);
5141 got_shared_uuid = true;
5142 devip->lu_name = shared_uuid;
5145 devip->sdbg_host = sdbg_host;
5146 if (sdeb_zbc_in_use) {
5147 devip->zmodel = sdeb_zbc_model;
5148 if (sdebug_device_create_zones(devip)) {
5153 devip->zmodel = BLK_ZONED_NONE;
5155 devip->sdbg_host = sdbg_host;
5156 devip->create_ts = ktime_get_boottime();
5157 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5158 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5163 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5165 struct sdebug_host_info *sdbg_host;
5166 struct sdebug_dev_info *open_devip = NULL;
5167 struct sdebug_dev_info *devip;
5169 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5171 pr_err("Host info NULL\n");
5175 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5176 if ((devip->used) && (devip->channel == sdev->channel) &&
5177 (devip->target == sdev->id) &&
5178 (devip->lun == sdev->lun))
5181 if ((!devip->used) && (!open_devip))
5185 if (!open_devip) { /* try and make a new one */
5186 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5188 pr_err("out of memory at line %d\n", __LINE__);
5193 open_devip->channel = sdev->channel;
5194 open_devip->target = sdev->id;
5195 open_devip->lun = sdev->lun;
5196 open_devip->sdbg_host = sdbg_host;
5197 atomic_set(&open_devip->num_in_q, 0);
5198 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5199 open_devip->used = true;
5203 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5206 pr_info("slave_alloc <%u %u %u %llu>\n",
5207 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5211 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5213 struct sdebug_dev_info *devip =
5214 (struct sdebug_dev_info *)sdp->hostdata;
5217 pr_info("slave_configure <%u %u %u %llu>\n",
5218 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5219 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5220 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5221 if (devip == NULL) {
5222 devip = find_build_dev_info(sdp);
5224 return 1; /* no resources, will be marked offline */
5226 sdp->hostdata = devip;
5228 sdp->no_uld_attach = 1;
5229 config_cdb_len(sdp);
5233 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5235 struct sdebug_dev_info *devip =
5236 (struct sdebug_dev_info *)sdp->hostdata;
5239 pr_info("slave_destroy <%u %u %u %llu>\n",
5240 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5242 /* make this slot available for re-use */
5243 devip->used = false;
5244 sdp->hostdata = NULL;
5248 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5249 enum sdeb_defer_type defer_t)
5253 if (defer_t == SDEB_DEFER_HRT)
5254 hrtimer_cancel(&sd_dp->hrt);
5255 else if (defer_t == SDEB_DEFER_WQ)
5256 cancel_work_sync(&sd_dp->ew.work);
5259 /* If @cmnd found deletes its timer or work queue and returns true; else
5261 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5263 unsigned long iflags;
5264 int j, k, qmax, r_qmax;
5265 enum sdeb_defer_type l_defer_t;
5266 struct sdebug_queue *sqp;
5267 struct sdebug_queued_cmd *sqcp;
5268 struct sdebug_dev_info *devip;
5269 struct sdebug_defer *sd_dp;
5271 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5272 spin_lock_irqsave(&sqp->qc_lock, iflags);
5273 qmax = sdebug_max_queue;
5274 r_qmax = atomic_read(&retired_max_queue);
5277 for (k = 0; k < qmax; ++k) {
5278 if (test_bit(k, sqp->in_use_bm)) {
5279 sqcp = &sqp->qc_arr[k];
5280 if (cmnd != sqcp->a_cmnd)
5283 devip = (struct sdebug_dev_info *)
5284 cmnd->device->hostdata;
5286 atomic_dec(&devip->num_in_q);
5287 sqcp->a_cmnd = NULL;
5288 sd_dp = sqcp->sd_dp;
5290 l_defer_t = READ_ONCE(sd_dp->defer_t);
5291 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5293 l_defer_t = SDEB_DEFER_NONE;
5294 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5295 stop_qc_helper(sd_dp, l_defer_t);
5296 clear_bit(k, sqp->in_use_bm);
5300 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5305 /* Deletes (stops) timers or work queues of all queued commands */
5306 static void stop_all_queued(void)
5308 unsigned long iflags;
5310 enum sdeb_defer_type l_defer_t;
5311 struct sdebug_queue *sqp;
5312 struct sdebug_queued_cmd *sqcp;
5313 struct sdebug_dev_info *devip;
5314 struct sdebug_defer *sd_dp;
5316 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5317 spin_lock_irqsave(&sqp->qc_lock, iflags);
5318 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5319 if (test_bit(k, sqp->in_use_bm)) {
5320 sqcp = &sqp->qc_arr[k];
5321 if (sqcp->a_cmnd == NULL)
5323 devip = (struct sdebug_dev_info *)
5324 sqcp->a_cmnd->device->hostdata;
5326 atomic_dec(&devip->num_in_q);
5327 sqcp->a_cmnd = NULL;
5328 sd_dp = sqcp->sd_dp;
5330 l_defer_t = READ_ONCE(sd_dp->defer_t);
5331 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5333 l_defer_t = SDEB_DEFER_NONE;
5334 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5335 stop_qc_helper(sd_dp, l_defer_t);
5336 clear_bit(k, sqp->in_use_bm);
5337 spin_lock_irqsave(&sqp->qc_lock, iflags);
5340 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5344 /* Free queued command memory on heap */
5345 static void free_all_queued(void)
5348 struct sdebug_queue *sqp;
5349 struct sdebug_queued_cmd *sqcp;
5351 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5352 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5353 sqcp = &sqp->qc_arr[k];
5360 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5366 ok = stop_queued_cmnd(SCpnt);
5367 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5368 sdev_printk(KERN_INFO, SCpnt->device,
5369 "%s: command%s found\n", __func__,
5375 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5378 if (SCpnt && SCpnt->device) {
5379 struct scsi_device *sdp = SCpnt->device;
5380 struct sdebug_dev_info *devip =
5381 (struct sdebug_dev_info *)sdp->hostdata;
5383 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5384 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5386 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5391 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5393 struct sdebug_host_info *sdbg_host;
5394 struct sdebug_dev_info *devip;
5395 struct scsi_device *sdp;
5396 struct Scsi_Host *hp;
5399 ++num_target_resets;
5402 sdp = SCpnt->device;
5405 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5406 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5410 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5412 list_for_each_entry(devip,
5413 &sdbg_host->dev_info_list,
5415 if (devip->target == sdp->id) {
5416 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5420 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5421 sdev_printk(KERN_INFO, sdp,
5422 "%s: %d device(s) found in target\n", __func__, k);
5427 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5429 struct sdebug_host_info *sdbg_host;
5430 struct sdebug_dev_info *devip;
5431 struct scsi_device *sdp;
5432 struct Scsi_Host *hp;
5436 if (!(SCpnt && SCpnt->device))
5438 sdp = SCpnt->device;
5439 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5440 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5443 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5445 list_for_each_entry(devip,
5446 &sdbg_host->dev_info_list,
5448 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5453 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5454 sdev_printk(KERN_INFO, sdp,
5455 "%s: %d device(s) found in host\n", __func__, k);
5460 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5462 struct sdebug_host_info *sdbg_host;
5463 struct sdebug_dev_info *devip;
5467 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5468 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5469 spin_lock(&sdebug_host_list_lock);
5470 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5471 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5473 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5477 spin_unlock(&sdebug_host_list_lock);
5479 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5480 sdev_printk(KERN_INFO, SCpnt->device,
5481 "%s: %d device(s) found\n", __func__, k);
5485 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5487 struct msdos_partition *pp;
5488 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5489 int sectors_per_part, num_sectors, k;
5490 int heads_by_sects, start_sec, end_sec;
5492 /* assume partition table already zeroed */
5493 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5495 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5496 sdebug_num_parts = SDEBUG_MAX_PARTS;
5497 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5499 num_sectors = (int)get_sdebug_capacity();
5500 sectors_per_part = (num_sectors - sdebug_sectors_per)
5502 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5503 starts[0] = sdebug_sectors_per;
5504 max_part_secs = sectors_per_part;
5505 for (k = 1; k < sdebug_num_parts; ++k) {
5506 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5508 if (starts[k] - starts[k - 1] < max_part_secs)
5509 max_part_secs = starts[k] - starts[k - 1];
5511 starts[sdebug_num_parts] = num_sectors;
5512 starts[sdebug_num_parts + 1] = 0;
5514 ramp[510] = 0x55; /* magic partition markings */
5516 pp = (struct msdos_partition *)(ramp + 0x1be);
5517 for (k = 0; starts[k + 1]; ++k, ++pp) {
5518 start_sec = starts[k];
5519 end_sec = starts[k] + max_part_secs - 1;
5522 pp->cyl = start_sec / heads_by_sects;
5523 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5524 / sdebug_sectors_per;
5525 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5527 pp->end_cyl = end_sec / heads_by_sects;
5528 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5529 / sdebug_sectors_per;
5530 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5532 pp->start_sect = cpu_to_le32(start_sec);
5533 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5534 pp->sys_ind = 0x83; /* plain Linux partition */
5538 static void block_unblock_all_queues(bool block)
5541 struct sdebug_queue *sqp;
5543 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5544 atomic_set(&sqp->blocked, (int)block);
5547 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5548 * commands will be processed normally before triggers occur.
5550 static void tweak_cmnd_count(void)
5554 modulo = abs(sdebug_every_nth);
5557 block_unblock_all_queues(true);
5558 count = atomic_read(&sdebug_cmnd_count);
5559 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5560 block_unblock_all_queues(false);
5563 static void clear_queue_stats(void)
5565 atomic_set(&sdebug_cmnd_count, 0);
5566 atomic_set(&sdebug_completions, 0);
5567 atomic_set(&sdebug_miss_cpus, 0);
5568 atomic_set(&sdebug_a_tsf, 0);
5571 static bool inject_on_this_cmd(void)
5573 if (sdebug_every_nth == 0)
5575 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5578 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5580 /* Complete the processing of the thread that queued a SCSI command to this
5581 * driver. It either completes the command by calling cmnd_done() or
5582 * schedules a hr timer or work queue then returns 0. Returns
5583 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5585 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5587 int (*pfp)(struct scsi_cmnd *,
5588 struct sdebug_dev_info *),
5589 int delta_jiff, int ndelay)
5592 bool inject = false;
5593 bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5594 int k, num_in_q, qdepth;
5595 unsigned long iflags;
5596 u64 ns_from_boot = 0;
5597 struct sdebug_queue *sqp;
5598 struct sdebug_queued_cmd *sqcp;
5599 struct scsi_device *sdp;
5600 struct sdebug_defer *sd_dp;
5602 if (unlikely(devip == NULL)) {
5603 if (scsi_result == 0)
5604 scsi_result = DID_NO_CONNECT << 16;
5605 goto respond_in_thread;
5609 if (delta_jiff == 0)
5610 goto respond_in_thread;
5612 sqp = get_queue(cmnd);
5613 spin_lock_irqsave(&sqp->qc_lock, iflags);
5614 if (unlikely(atomic_read(&sqp->blocked))) {
5615 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5616 return SCSI_MLQUEUE_HOST_BUSY;
5618 num_in_q = atomic_read(&devip->num_in_q);
5619 qdepth = cmnd->device->queue_depth;
5620 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5622 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5623 goto respond_in_thread;
5625 scsi_result = device_qfull_result;
5626 } else if (unlikely(sdebug_every_nth &&
5627 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5628 (scsi_result == 0))) {
5629 if ((num_in_q == (qdepth - 1)) &&
5630 (atomic_inc_return(&sdebug_a_tsf) >=
5631 abs(sdebug_every_nth))) {
5632 atomic_set(&sdebug_a_tsf, 0);
5634 scsi_result = device_qfull_result;
5638 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5639 if (unlikely(k >= sdebug_max_queue)) {
5640 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5642 goto respond_in_thread;
5643 scsi_result = device_qfull_result;
5644 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5645 sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5646 __func__, sdebug_max_queue);
5647 goto respond_in_thread;
5649 set_bit(k, sqp->in_use_bm);
5650 atomic_inc(&devip->num_in_q);
5651 sqcp = &sqp->qc_arr[k];
5652 sqcp->a_cmnd = cmnd;
5653 cmnd->host_scribble = (unsigned char *)sqcp;
5654 sd_dp = sqcp->sd_dp;
5655 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5658 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5660 atomic_dec(&devip->num_in_q);
5661 clear_bit(k, sqp->in_use_bm);
5662 return SCSI_MLQUEUE_HOST_BUSY;
5669 /* Set the hostwide tag */
5670 if (sdebug_host_max_queue)
5671 sd_dp->hc_idx = get_tag(cmnd);
5674 ns_from_boot = ktime_get_boottime_ns();
5676 /* one of the resp_*() response functions is called here */
5677 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5678 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5679 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5680 delta_jiff = ndelay = 0;
5682 if (cmnd->result == 0 && scsi_result != 0)
5683 cmnd->result = scsi_result;
5684 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5685 if (atomic_read(&sdeb_inject_pending)) {
5686 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5687 atomic_set(&sdeb_inject_pending, 0);
5688 cmnd->result = check_condition_result;
5692 if (unlikely(sdebug_verbose && cmnd->result))
5693 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5694 __func__, cmnd->result);
5696 if (delta_jiff > 0 || ndelay > 0) {
5699 if (delta_jiff > 0) {
5700 u64 ns = jiffies_to_nsecs(delta_jiff);
5702 if (sdebug_random && ns < U32_MAX) {
5703 ns = get_random_u32_below((u32)ns);
5704 } else if (sdebug_random) {
5705 ns >>= 12; /* scale to 4 usec precision */
5706 if (ns < U32_MAX) /* over 4 hours max */
5707 ns = get_random_u32_below((u32)ns);
5710 kt = ns_to_ktime(ns);
5711 } else { /* ndelay has a 4.2 second max */
5712 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5714 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5715 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5717 if (kt <= d) { /* elapsed duration >= kt */
5718 spin_lock_irqsave(&sqp->qc_lock, iflags);
5719 sqcp->a_cmnd = NULL;
5720 atomic_dec(&devip->num_in_q);
5721 clear_bit(k, sqp->in_use_bm);
5722 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5725 /* call scsi_done() from this thread */
5729 /* otherwise reduce kt by elapsed time */
5734 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5735 spin_lock_irqsave(&sqp->qc_lock, iflags);
5736 if (!sd_dp->init_poll) {
5737 sd_dp->init_poll = true;
5738 sqcp->sd_dp = sd_dp;
5739 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5742 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5743 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5745 if (!sd_dp->init_hrt) {
5746 sd_dp->init_hrt = true;
5747 sqcp->sd_dp = sd_dp;
5748 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5749 HRTIMER_MODE_REL_PINNED);
5750 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5751 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5754 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5755 /* schedule the invocation of scsi_done() for a later time */
5756 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5758 if (sdebug_statistics)
5759 sd_dp->issuing_cpu = raw_smp_processor_id();
5760 } else { /* jdelay < 0, use work queue */
5761 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5762 atomic_read(&sdeb_inject_pending)))
5763 sd_dp->aborted = true;
5765 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5766 spin_lock_irqsave(&sqp->qc_lock, iflags);
5767 if (!sd_dp->init_poll) {
5768 sd_dp->init_poll = true;
5769 sqcp->sd_dp = sd_dp;
5770 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5773 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5774 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5776 if (!sd_dp->init_wq) {
5777 sd_dp->init_wq = true;
5778 sqcp->sd_dp = sd_dp;
5779 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5781 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5783 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5784 schedule_work(&sd_dp->ew.work);
5786 if (sdebug_statistics)
5787 sd_dp->issuing_cpu = raw_smp_processor_id();
5788 if (unlikely(sd_dp->aborted)) {
5789 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5790 scsi_cmd_to_rq(cmnd)->tag);
5791 blk_abort_request(scsi_cmd_to_rq(cmnd));
5792 atomic_set(&sdeb_inject_pending, 0);
5793 sd_dp->aborted = false;
5796 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5797 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5798 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5801 respond_in_thread: /* call back to mid-layer using invocation thread */
5802 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5803 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5804 if (cmnd->result == 0 && scsi_result != 0)
5805 cmnd->result = scsi_result;
5810 /* Note: The following macros create attribute files in the
5811 /sys/module/scsi_debug/parameters directory. Unfortunately this
5812 driver is unaware of a change and cannot trigger auxiliary actions
5813 as it can when the corresponding attribute in the
5814 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5816 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5817 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5818 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5819 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5820 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5821 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5822 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5823 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5824 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5825 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5826 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5827 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5828 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5829 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5830 module_param_string(inq_product, sdebug_inq_product_id,
5831 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5832 module_param_string(inq_rev, sdebug_inq_product_rev,
5833 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5834 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5835 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5836 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5837 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5838 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5839 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5840 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5841 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5842 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5843 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5844 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5846 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5848 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5849 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5850 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5851 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5852 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5853 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5854 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5855 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5856 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5857 module_param_named(per_host_store, sdebug_per_host_store, bool,
5859 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5860 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5861 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5862 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5863 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5864 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5865 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5866 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5867 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5868 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5869 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5870 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5871 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5872 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5873 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5874 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5875 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5876 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5878 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5879 module_param_named(write_same_length, sdebug_write_same_length, int,
5881 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5882 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5883 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5884 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5885 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5887 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5888 MODULE_DESCRIPTION("SCSI debug adapter driver");
5889 MODULE_LICENSE("GPL");
5890 MODULE_VERSION(SDEBUG_VERSION);
5892 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5893 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5894 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5895 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5896 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5897 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5898 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5899 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5900 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5901 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5902 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5903 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5904 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5905 MODULE_PARM_DESC(host_max_queue,
5906 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5907 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5908 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5909 SDEBUG_VERSION "\")");
5910 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5911 MODULE_PARM_DESC(lbprz,
5912 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5913 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5914 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5915 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5916 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5917 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5918 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5919 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5920 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5921 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5922 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5923 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5924 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5925 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5926 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5927 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5928 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5929 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5930 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5931 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5932 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5933 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5934 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5935 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5936 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5937 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5938 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5939 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5940 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5941 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5942 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5943 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5944 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5945 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5946 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5947 MODULE_PARM_DESC(uuid_ctl,
5948 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5949 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5950 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5951 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5952 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5953 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5954 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5955 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5956 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5957 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5959 #define SDEBUG_INFO_LEN 256
5960 static char sdebug_info[SDEBUG_INFO_LEN];
5962 static const char *scsi_debug_info(struct Scsi_Host *shp)
5966 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5967 my_name, SDEBUG_VERSION, sdebug_version_date);
5968 if (k >= (SDEBUG_INFO_LEN - 1))
5970 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5971 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5972 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5973 "statistics", (int)sdebug_statistics);
5977 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5978 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5983 int minLen = length > 15 ? 15 : length;
5985 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5987 memcpy(arr, buffer, minLen);
5989 if (1 != sscanf(arr, "%d", &opts))
5992 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5993 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5994 if (sdebug_every_nth != 0)
5999 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6000 * same for each scsi_debug host (if more than one). Some of the counters
6001 * output are not atomics so might be inaccurate in a busy system. */
6002 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6005 struct sdebug_queue *sqp;
6006 struct sdebug_host_info *sdhp;
6008 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6009 SDEBUG_VERSION, sdebug_version_date);
6010 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6011 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6012 sdebug_opts, sdebug_every_nth);
6013 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6014 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6015 sdebug_sector_size, "bytes");
6016 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6017 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6019 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6020 num_dev_resets, num_target_resets, num_bus_resets,
6022 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6023 dix_reads, dix_writes, dif_errors);
6024 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6026 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6027 atomic_read(&sdebug_cmnd_count),
6028 atomic_read(&sdebug_completions),
6029 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6030 atomic_read(&sdebug_a_tsf),
6031 atomic_read(&sdeb_mq_poll_count));
6033 seq_printf(m, "submit_queues=%d\n", submit_queues);
6034 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6035 seq_printf(m, " queue %d:\n", j);
6036 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6037 if (f != sdebug_max_queue) {
6038 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6039 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6040 "first,last bits", f, l);
6044 seq_printf(m, "this host_no=%d\n", host->host_no);
6045 if (!xa_empty(per_store_ap)) {
6048 unsigned long l_idx;
6049 struct sdeb_store_info *sip;
6051 seq_puts(m, "\nhost list:\n");
6053 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6055 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6056 sdhp->shost->host_no, idx);
6059 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6060 sdeb_most_recent_idx);
6062 xa_for_each(per_store_ap, l_idx, sip) {
6063 niu = xa_get_mark(per_store_ap, l_idx,
6064 SDEB_XA_NOT_IN_USE);
6066 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6067 (niu ? " not_in_use" : ""));
6074 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6076 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6078 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6079 * of delay is jiffies.
6081 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6086 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6088 if (sdebug_jdelay != jdelay) {
6090 struct sdebug_queue *sqp;
6092 block_unblock_all_queues(true);
6093 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6095 k = find_first_bit(sqp->in_use_bm,
6097 if (k != sdebug_max_queue) {
6098 res = -EBUSY; /* queued commands */
6103 sdebug_jdelay = jdelay;
6106 block_unblock_all_queues(false);
6112 static DRIVER_ATTR_RW(delay);
6114 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6116 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6118 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6119 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6120 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6125 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6126 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6128 if (sdebug_ndelay != ndelay) {
6130 struct sdebug_queue *sqp;
6132 block_unblock_all_queues(true);
6133 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6135 k = find_first_bit(sqp->in_use_bm,
6137 if (k != sdebug_max_queue) {
6138 res = -EBUSY; /* queued commands */
6143 sdebug_ndelay = ndelay;
6144 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6147 block_unblock_all_queues(false);
6153 static DRIVER_ATTR_RW(ndelay);
6155 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6157 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6160 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6166 if (sscanf(buf, "%10s", work) == 1) {
6167 if (strncasecmp(work, "0x", 2) == 0) {
6168 if (kstrtoint(work + 2, 16, &opts) == 0)
6171 if (kstrtoint(work, 10, &opts) == 0)
6178 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6179 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6183 static DRIVER_ATTR_RW(opts);
6185 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6187 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6189 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6194 /* Cannot change from or to TYPE_ZBC with sysfs */
6195 if (sdebug_ptype == TYPE_ZBC)
6198 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6206 static DRIVER_ATTR_RW(ptype);
6208 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6210 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6212 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6217 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6223 static DRIVER_ATTR_RW(dsense);
6225 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6227 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6229 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6234 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6235 bool want_store = (n == 0);
6236 struct sdebug_host_info *sdhp;
6239 sdebug_fake_rw = (sdebug_fake_rw > 0);
6240 if (sdebug_fake_rw == n)
6241 return count; /* not transitioning so do nothing */
6243 if (want_store) { /* 1 --> 0 transition, set up store */
6244 if (sdeb_first_idx < 0) {
6245 idx = sdebug_add_store();
6249 idx = sdeb_first_idx;
6250 xa_clear_mark(per_store_ap, idx,
6251 SDEB_XA_NOT_IN_USE);
6253 /* make all hosts use same store */
6254 list_for_each_entry(sdhp, &sdebug_host_list,
6256 if (sdhp->si_idx != idx) {
6257 xa_set_mark(per_store_ap, sdhp->si_idx,
6258 SDEB_XA_NOT_IN_USE);
6262 sdeb_most_recent_idx = idx;
6263 } else { /* 0 --> 1 transition is trigger for shrink */
6264 sdebug_erase_all_stores(true /* apart from first */);
6271 static DRIVER_ATTR_RW(fake_rw);
6273 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6275 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6277 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6282 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6283 sdebug_no_lun_0 = n;
6288 static DRIVER_ATTR_RW(no_lun_0);
6290 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6292 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6294 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6299 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6300 sdebug_num_tgts = n;
6301 sdebug_max_tgts_luns();
6306 static DRIVER_ATTR_RW(num_tgts);
6308 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6310 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6312 static DRIVER_ATTR_RO(dev_size_mb);
6314 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6316 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6319 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6324 if (kstrtobool(buf, &v))
6327 sdebug_per_host_store = v;
6330 static DRIVER_ATTR_RW(per_host_store);
6332 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6334 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6336 static DRIVER_ATTR_RO(num_parts);
6338 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6340 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6342 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6348 if (sscanf(buf, "%10s", work) == 1) {
6349 if (strncasecmp(work, "0x", 2) == 0) {
6350 if (kstrtoint(work + 2, 16, &nth) == 0)
6351 goto every_nth_done;
6353 if (kstrtoint(work, 10, &nth) == 0)
6354 goto every_nth_done;
6360 sdebug_every_nth = nth;
6361 if (nth && !sdebug_statistics) {
6362 pr_info("every_nth needs statistics=1, set it\n");
6363 sdebug_statistics = true;
6368 static DRIVER_ATTR_RW(every_nth);
6370 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6372 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6374 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6380 if (kstrtoint(buf, 0, &n))
6383 if (n > (int)SAM_LUN_AM_FLAT) {
6384 pr_warn("only LUN address methods 0 and 1 are supported\n");
6387 changed = ((int)sdebug_lun_am != n);
6389 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6390 struct sdebug_host_info *sdhp;
6391 struct sdebug_dev_info *dp;
6393 spin_lock(&sdebug_host_list_lock);
6394 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6395 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6396 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6399 spin_unlock(&sdebug_host_list_lock);
6405 static DRIVER_ATTR_RW(lun_format);
6407 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6409 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6411 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6417 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6419 pr_warn("max_luns can be no more than 256\n");
6422 changed = (sdebug_max_luns != n);
6423 sdebug_max_luns = n;
6424 sdebug_max_tgts_luns();
6425 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6426 struct sdebug_host_info *sdhp;
6427 struct sdebug_dev_info *dp;
6429 spin_lock(&sdebug_host_list_lock);
6430 list_for_each_entry(sdhp, &sdebug_host_list,
6432 list_for_each_entry(dp, &sdhp->dev_info_list,
6434 set_bit(SDEBUG_UA_LUNS_CHANGED,
6438 spin_unlock(&sdebug_host_list_lock);
6444 static DRIVER_ATTR_RW(max_luns);
6446 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6448 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6450 /* N.B. max_queue can be changed while there are queued commands. In flight
6451 * commands beyond the new max_queue will be completed. */
6452 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6456 struct sdebug_queue *sqp;
6458 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6459 (n <= SDEBUG_CANQUEUE) &&
6460 (sdebug_host_max_queue == 0)) {
6461 block_unblock_all_queues(true);
6463 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6465 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6469 sdebug_max_queue = n;
6470 if (k == SDEBUG_CANQUEUE)
6471 atomic_set(&retired_max_queue, 0);
6473 atomic_set(&retired_max_queue, k + 1);
6475 atomic_set(&retired_max_queue, 0);
6476 block_unblock_all_queues(false);
6481 static DRIVER_ATTR_RW(max_queue);
6483 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6485 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6488 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6490 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6493 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6497 if (kstrtobool(buf, &v))
6500 sdebug_no_rwlock = v;
6503 static DRIVER_ATTR_RW(no_rwlock);
6506 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6507 * in range [0, sdebug_host_max_queue), we can't change it.
6509 static DRIVER_ATTR_RO(host_max_queue);
6511 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6513 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6515 static DRIVER_ATTR_RO(no_uld);
6517 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6519 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6521 static DRIVER_ATTR_RO(scsi_level);
6523 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6525 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6527 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6533 /* Ignore capacity change for ZBC drives for now */
6534 if (sdeb_zbc_in_use)
6537 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6538 changed = (sdebug_virtual_gb != n);
6539 sdebug_virtual_gb = n;
6540 sdebug_capacity = get_sdebug_capacity();
6542 struct sdebug_host_info *sdhp;
6543 struct sdebug_dev_info *dp;
6545 spin_lock(&sdebug_host_list_lock);
6546 list_for_each_entry(sdhp, &sdebug_host_list,
6548 list_for_each_entry(dp, &sdhp->dev_info_list,
6550 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6554 spin_unlock(&sdebug_host_list_lock);
6560 static DRIVER_ATTR_RW(virtual_gb);
6562 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6564 /* absolute number of hosts currently active is what is shown */
6565 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6568 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6573 struct sdeb_store_info *sip;
6574 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6577 if (sscanf(buf, "%d", &delta_hosts) != 1)
6579 if (delta_hosts > 0) {
6583 xa_for_each_marked(per_store_ap, idx, sip,
6584 SDEB_XA_NOT_IN_USE) {
6585 sdeb_most_recent_idx = (int)idx;
6589 if (found) /* re-use case */
6590 sdebug_add_host_helper((int)idx);
6592 sdebug_do_add_host(true);
6594 sdebug_do_add_host(false);
6596 } while (--delta_hosts);
6597 } else if (delta_hosts < 0) {
6599 sdebug_do_remove_host(false);
6600 } while (++delta_hosts);
6604 static DRIVER_ATTR_RW(add_host);
6606 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6608 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6610 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6615 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6616 sdebug_vpd_use_hostno = n;
6621 static DRIVER_ATTR_RW(vpd_use_hostno);
6623 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6625 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6627 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6632 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6634 sdebug_statistics = true;
6636 clear_queue_stats();
6637 sdebug_statistics = false;
6643 static DRIVER_ATTR_RW(statistics);
6645 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6647 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6649 static DRIVER_ATTR_RO(sector_size);
6651 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6653 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6655 static DRIVER_ATTR_RO(submit_queues);
6657 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6659 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6661 static DRIVER_ATTR_RO(dix);
6663 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6665 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6667 static DRIVER_ATTR_RO(dif);
6669 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6671 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6673 static DRIVER_ATTR_RO(guard);
6675 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6677 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6679 static DRIVER_ATTR_RO(ato);
6681 static ssize_t map_show(struct device_driver *ddp, char *buf)
6685 if (!scsi_debug_lbp())
6686 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6687 sdebug_store_sectors);
6689 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6690 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6693 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6694 (int)map_size, sip->map_storep);
6696 buf[count++] = '\n';
6701 static DRIVER_ATTR_RO(map);
6703 static ssize_t random_show(struct device_driver *ddp, char *buf)
6705 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6708 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6713 if (kstrtobool(buf, &v))
6719 static DRIVER_ATTR_RW(random);
6721 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6723 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6725 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6730 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6731 sdebug_removable = (n > 0);
6736 static DRIVER_ATTR_RW(removable);
6738 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6740 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6742 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6743 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6748 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6749 sdebug_host_lock = (n > 0);
6754 static DRIVER_ATTR_RW(host_lock);
6756 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6758 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6760 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6765 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6766 sdebug_strict = (n > 0);
6771 static DRIVER_ATTR_RW(strict);
6773 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6775 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6777 static DRIVER_ATTR_RO(uuid_ctl);
6779 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6781 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6783 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6788 ret = kstrtoint(buf, 0, &n);
6792 all_config_cdb_len();
6795 static DRIVER_ATTR_RW(cdb_len);
6797 static const char * const zbc_model_strs_a[] = {
6798 [BLK_ZONED_NONE] = "none",
6799 [BLK_ZONED_HA] = "host-aware",
6800 [BLK_ZONED_HM] = "host-managed",
6803 static const char * const zbc_model_strs_b[] = {
6804 [BLK_ZONED_NONE] = "no",
6805 [BLK_ZONED_HA] = "aware",
6806 [BLK_ZONED_HM] = "managed",
6809 static const char * const zbc_model_strs_c[] = {
6810 [BLK_ZONED_NONE] = "0",
6811 [BLK_ZONED_HA] = "1",
6812 [BLK_ZONED_HM] = "2",
6815 static int sdeb_zbc_model_str(const char *cp)
6817 int res = sysfs_match_string(zbc_model_strs_a, cp);
6820 res = sysfs_match_string(zbc_model_strs_b, cp);
6822 res = sysfs_match_string(zbc_model_strs_c, cp);
6830 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6832 return scnprintf(buf, PAGE_SIZE, "%s\n",
6833 zbc_model_strs_a[sdeb_zbc_model]);
6835 static DRIVER_ATTR_RO(zbc);
6837 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6839 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6841 static DRIVER_ATTR_RO(tur_ms_to_ready);
6843 /* Note: The following array creates attribute files in the
6844 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6845 files (over those found in the /sys/module/scsi_debug/parameters
6846 directory) is that auxiliary actions can be triggered when an attribute
6847 is changed. For example see: add_host_store() above.
6850 static struct attribute *sdebug_drv_attrs[] = {
6851 &driver_attr_delay.attr,
6852 &driver_attr_opts.attr,
6853 &driver_attr_ptype.attr,
6854 &driver_attr_dsense.attr,
6855 &driver_attr_fake_rw.attr,
6856 &driver_attr_host_max_queue.attr,
6857 &driver_attr_no_lun_0.attr,
6858 &driver_attr_num_tgts.attr,
6859 &driver_attr_dev_size_mb.attr,
6860 &driver_attr_num_parts.attr,
6861 &driver_attr_every_nth.attr,
6862 &driver_attr_lun_format.attr,
6863 &driver_attr_max_luns.attr,
6864 &driver_attr_max_queue.attr,
6865 &driver_attr_no_rwlock.attr,
6866 &driver_attr_no_uld.attr,
6867 &driver_attr_scsi_level.attr,
6868 &driver_attr_virtual_gb.attr,
6869 &driver_attr_add_host.attr,
6870 &driver_attr_per_host_store.attr,
6871 &driver_attr_vpd_use_hostno.attr,
6872 &driver_attr_sector_size.attr,
6873 &driver_attr_statistics.attr,
6874 &driver_attr_submit_queues.attr,
6875 &driver_attr_dix.attr,
6876 &driver_attr_dif.attr,
6877 &driver_attr_guard.attr,
6878 &driver_attr_ato.attr,
6879 &driver_attr_map.attr,
6880 &driver_attr_random.attr,
6881 &driver_attr_removable.attr,
6882 &driver_attr_host_lock.attr,
6883 &driver_attr_ndelay.attr,
6884 &driver_attr_strict.attr,
6885 &driver_attr_uuid_ctl.attr,
6886 &driver_attr_cdb_len.attr,
6887 &driver_attr_tur_ms_to_ready.attr,
6888 &driver_attr_zbc.attr,
6891 ATTRIBUTE_GROUPS(sdebug_drv);
6893 static struct device *pseudo_primary;
6895 static int __init scsi_debug_init(void)
6897 bool want_store = (sdebug_fake_rw == 0);
6899 int k, ret, hosts_to_add;
6902 ramdisk_lck_a[0] = &atomic_rw;
6903 ramdisk_lck_a[1] = &atomic_rw2;
6904 atomic_set(&retired_max_queue, 0);
6906 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6907 pr_warn("ndelay must be less than 1 second, ignored\n");
6909 } else if (sdebug_ndelay > 0)
6910 sdebug_jdelay = JDELAY_OVERRIDDEN;
6912 switch (sdebug_sector_size) {
6919 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6923 switch (sdebug_dif) {
6924 case T10_PI_TYPE0_PROTECTION:
6926 case T10_PI_TYPE1_PROTECTION:
6927 case T10_PI_TYPE2_PROTECTION:
6928 case T10_PI_TYPE3_PROTECTION:
6929 have_dif_prot = true;
6933 pr_err("dif must be 0, 1, 2 or 3\n");
6937 if (sdebug_num_tgts < 0) {
6938 pr_err("num_tgts must be >= 0\n");
6942 if (sdebug_guard > 1) {
6943 pr_err("guard must be 0 or 1\n");
6947 if (sdebug_ato > 1) {
6948 pr_err("ato must be 0 or 1\n");
6952 if (sdebug_physblk_exp > 15) {
6953 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6957 sdebug_lun_am = sdebug_lun_am_i;
6958 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6959 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6960 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6963 if (sdebug_max_luns > 256) {
6964 if (sdebug_max_luns > 16384) {
6965 pr_warn("max_luns can be no more than 16384, use default\n");
6966 sdebug_max_luns = DEF_MAX_LUNS;
6968 sdebug_lun_am = SAM_LUN_AM_FLAT;
6971 if (sdebug_lowest_aligned > 0x3fff) {
6972 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6976 if (submit_queues < 1) {
6977 pr_err("submit_queues must be 1 or more\n");
6981 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6982 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6986 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6987 (sdebug_host_max_queue < 0)) {
6988 pr_err("host_max_queue must be in range [0 %d]\n",
6993 if (sdebug_host_max_queue &&
6994 (sdebug_max_queue != sdebug_host_max_queue)) {
6995 sdebug_max_queue = sdebug_host_max_queue;
6996 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7000 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
7002 if (sdebug_q_arr == NULL)
7004 for (k = 0; k < submit_queues; ++k)
7005 spin_lock_init(&sdebug_q_arr[k].qc_lock);
7008 * check for host managed zoned block device specified with
7009 * ptype=0x14 or zbc=XXX.
7011 if (sdebug_ptype == TYPE_ZBC) {
7012 sdeb_zbc_model = BLK_ZONED_HM;
7013 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7014 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7020 switch (sdeb_zbc_model) {
7021 case BLK_ZONED_NONE:
7023 sdebug_ptype = TYPE_DISK;
7026 sdebug_ptype = TYPE_ZBC;
7029 pr_err("Invalid ZBC model\n");
7034 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7035 sdeb_zbc_in_use = true;
7036 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7037 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7040 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7041 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7042 if (sdebug_dev_size_mb < 1)
7043 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7044 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7045 sdebug_store_sectors = sz / sdebug_sector_size;
7046 sdebug_capacity = get_sdebug_capacity();
7048 /* play around with geometry, don't waste too much on track 0 */
7050 sdebug_sectors_per = 32;
7051 if (sdebug_dev_size_mb >= 256)
7053 else if (sdebug_dev_size_mb >= 16)
7055 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7056 (sdebug_sectors_per * sdebug_heads);
7057 if (sdebug_cylinders_per >= 1024) {
7058 /* other LLDs do this; implies >= 1GB ram disk ... */
7060 sdebug_sectors_per = 63;
7061 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7062 (sdebug_sectors_per * sdebug_heads);
7064 if (scsi_debug_lbp()) {
7065 sdebug_unmap_max_blocks =
7066 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7068 sdebug_unmap_max_desc =
7069 clamp(sdebug_unmap_max_desc, 0U, 256U);
7071 sdebug_unmap_granularity =
7072 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7074 if (sdebug_unmap_alignment &&
7075 sdebug_unmap_granularity <=
7076 sdebug_unmap_alignment) {
7077 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7082 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7084 idx = sdebug_add_store();
7091 pseudo_primary = root_device_register("pseudo_0");
7092 if (IS_ERR(pseudo_primary)) {
7093 pr_warn("root_device_register() error\n");
7094 ret = PTR_ERR(pseudo_primary);
7097 ret = bus_register(&pseudo_lld_bus);
7099 pr_warn("bus_register error: %d\n", ret);
7102 ret = driver_register(&sdebug_driverfs_driver);
7104 pr_warn("driver_register error: %d\n", ret);
7108 hosts_to_add = sdebug_add_host;
7109 sdebug_add_host = 0;
7111 for (k = 0; k < hosts_to_add; k++) {
7112 if (want_store && k == 0) {
7113 ret = sdebug_add_host_helper(idx);
7115 pr_err("add_host_helper k=%d, error=%d\n",
7120 ret = sdebug_do_add_host(want_store &&
7121 sdebug_per_host_store);
7123 pr_err("add_host k=%d error=%d\n", k, -ret);
7129 pr_info("built %d host(s)\n", sdebug_num_hosts);
7134 bus_unregister(&pseudo_lld_bus);
7136 root_device_unregister(pseudo_primary);
7138 sdebug_erase_store(idx, NULL);
7140 kfree(sdebug_q_arr);
7144 static void __exit scsi_debug_exit(void)
7146 int k = sdebug_num_hosts;
7150 sdebug_do_remove_host(true);
7152 driver_unregister(&sdebug_driverfs_driver);
7153 bus_unregister(&pseudo_lld_bus);
7154 root_device_unregister(pseudo_primary);
7156 sdebug_erase_all_stores(false);
7157 xa_destroy(per_store_ap);
7158 kfree(sdebug_q_arr);
7161 device_initcall(scsi_debug_init);
7162 module_exit(scsi_debug_exit);
7164 static void sdebug_release_adapter(struct device *dev)
7166 struct sdebug_host_info *sdbg_host;
7168 sdbg_host = to_sdebug_host(dev);
7172 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7173 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7178 if (xa_empty(per_store_ap))
7180 sip = xa_load(per_store_ap, idx);
7184 vfree(sip->map_storep);
7185 vfree(sip->dif_storep);
7187 xa_erase(per_store_ap, idx);
7191 /* Assume apart_from_first==false only in shutdown case. */
7192 static void sdebug_erase_all_stores(bool apart_from_first)
7195 struct sdeb_store_info *sip = NULL;
7197 xa_for_each(per_store_ap, idx, sip) {
7198 if (apart_from_first)
7199 apart_from_first = false;
7201 sdebug_erase_store(idx, sip);
7203 if (apart_from_first)
7204 sdeb_most_recent_idx = sdeb_first_idx;
7208 * Returns store xarray new element index (idx) if >=0 else negated errno.
7209 * Limit the number of stores to 65536.
7211 static int sdebug_add_store(void)
7215 unsigned long iflags;
7216 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7217 struct sdeb_store_info *sip = NULL;
7218 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7220 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7224 xa_lock_irqsave(per_store_ap, iflags);
7225 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7226 if (unlikely(res < 0)) {
7227 xa_unlock_irqrestore(per_store_ap, iflags);
7229 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7232 sdeb_most_recent_idx = n_idx;
7233 if (sdeb_first_idx < 0)
7234 sdeb_first_idx = n_idx;
7235 xa_unlock_irqrestore(per_store_ap, iflags);
7238 sip->storep = vzalloc(sz);
7240 pr_err("user data oom\n");
7243 if (sdebug_num_parts > 0)
7244 sdebug_build_parts(sip->storep, sz);
7246 /* DIF/DIX: what T10 calls Protection Information (PI) */
7250 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7251 sip->dif_storep = vmalloc(dif_size);
7253 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7256 if (!sip->dif_storep) {
7257 pr_err("DIX oom\n");
7260 memset(sip->dif_storep, 0xff, dif_size);
7262 /* Logical Block Provisioning */
7263 if (scsi_debug_lbp()) {
7264 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7265 sip->map_storep = vmalloc(array_size(sizeof(long),
7266 BITS_TO_LONGS(map_size)));
7268 pr_info("%lu provisioning blocks\n", map_size);
7270 if (!sip->map_storep) {
7271 pr_err("LBP map oom\n");
7275 bitmap_zero(sip->map_storep, map_size);
7277 /* Map first 1KB for partition table */
7278 if (sdebug_num_parts)
7279 map_region(sip, 0, 2);
7282 rwlock_init(&sip->macc_lck);
7285 sdebug_erase_store((int)n_idx, sip);
7286 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7290 static int sdebug_add_host_helper(int per_host_idx)
7292 int k, devs_per_host, idx;
7293 int error = -ENOMEM;
7294 struct sdebug_host_info *sdbg_host;
7295 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7297 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7300 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7301 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7302 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7303 sdbg_host->si_idx = idx;
7305 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7307 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7308 for (k = 0; k < devs_per_host; k++) {
7309 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7314 spin_lock(&sdebug_host_list_lock);
7315 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7316 spin_unlock(&sdebug_host_list_lock);
7318 sdbg_host->dev.bus = &pseudo_lld_bus;
7319 sdbg_host->dev.parent = pseudo_primary;
7320 sdbg_host->dev.release = &sdebug_release_adapter;
7321 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7323 error = device_register(&sdbg_host->dev);
7325 spin_lock(&sdebug_host_list_lock);
7326 list_del(&sdbg_host->host_list);
7327 spin_unlock(&sdebug_host_list_lock);
7335 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7337 list_del(&sdbg_devinfo->dev_list);
7338 kfree(sdbg_devinfo->zstate);
7339 kfree(sdbg_devinfo);
7341 if (sdbg_host->dev.release)
7342 put_device(&sdbg_host->dev);
7345 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7349 static int sdebug_do_add_host(bool mk_new_store)
7351 int ph_idx = sdeb_most_recent_idx;
7354 ph_idx = sdebug_add_store();
7358 return sdebug_add_host_helper(ph_idx);
7361 static void sdebug_do_remove_host(bool the_end)
7364 struct sdebug_host_info *sdbg_host = NULL;
7365 struct sdebug_host_info *sdbg_host2;
7367 spin_lock(&sdebug_host_list_lock);
7368 if (!list_empty(&sdebug_host_list)) {
7369 sdbg_host = list_entry(sdebug_host_list.prev,
7370 struct sdebug_host_info, host_list);
7371 idx = sdbg_host->si_idx;
7373 if (!the_end && idx >= 0) {
7376 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7377 if (sdbg_host2 == sdbg_host)
7379 if (idx == sdbg_host2->si_idx) {
7385 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7386 if (idx == sdeb_most_recent_idx)
7387 --sdeb_most_recent_idx;
7391 list_del(&sdbg_host->host_list);
7392 spin_unlock(&sdebug_host_list_lock);
7397 device_unregister(&sdbg_host->dev);
7401 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7404 struct sdebug_dev_info *devip;
7406 block_unblock_all_queues(true);
7407 devip = (struct sdebug_dev_info *)sdev->hostdata;
7408 if (NULL == devip) {
7409 block_unblock_all_queues(false);
7412 num_in_q = atomic_read(&devip->num_in_q);
7414 if (qdepth > SDEBUG_CANQUEUE) {
7415 qdepth = SDEBUG_CANQUEUE;
7416 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7417 qdepth, SDEBUG_CANQUEUE);
7421 if (qdepth != sdev->queue_depth)
7422 scsi_change_queue_depth(sdev, qdepth);
7424 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7425 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7426 __func__, qdepth, num_in_q);
7428 block_unblock_all_queues(false);
7429 return sdev->queue_depth;
7432 static bool fake_timeout(struct scsi_cmnd *scp)
7434 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7435 if (sdebug_every_nth < -1)
7436 sdebug_every_nth = -1;
7437 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7438 return true; /* ignore command causing timeout */
7439 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7440 scsi_medium_access_command(scp))
7441 return true; /* time out reads and writes */
7446 /* Response to TUR or media access command when device stopped */
7447 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7451 ktime_t now_ts = ktime_get_boottime();
7452 struct scsi_device *sdp = scp->device;
7454 stopped_state = atomic_read(&devip->stopped);
7455 if (stopped_state == 2) {
7456 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7457 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7458 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7459 /* tur_ms_to_ready timer extinguished */
7460 atomic_set(&devip->stopped, 0);
7464 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7466 sdev_printk(KERN_INFO, sdp,
7467 "%s: Not ready: in process of becoming ready\n", my_name);
7468 if (scp->cmnd[0] == TEST_UNIT_READY) {
7469 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7471 if (diff_ns <= tur_nanosecs_to_ready)
7472 diff_ns = tur_nanosecs_to_ready - diff_ns;
7474 diff_ns = tur_nanosecs_to_ready;
7475 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7476 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7477 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7479 return check_condition_result;
7482 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7484 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7486 return check_condition_result;
7489 static void sdebug_map_queues(struct Scsi_Host *shost)
7493 if (shost->nr_hw_queues == 1)
7496 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7497 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7501 if (i == HCTX_TYPE_DEFAULT)
7502 map->nr_queues = submit_queues - poll_queues;
7503 else if (i == HCTX_TYPE_POLL)
7504 map->nr_queues = poll_queues;
7506 if (!map->nr_queues) {
7507 BUG_ON(i == HCTX_TYPE_DEFAULT);
7511 map->queue_offset = qoff;
7512 blk_mq_map_queues(map);
7514 qoff += map->nr_queues;
7518 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7521 bool retiring = false;
7522 int num_entries = 0;
7523 unsigned int qc_idx = 0;
7524 unsigned long iflags;
7525 ktime_t kt_from_boot = ktime_get_boottime();
7526 struct sdebug_queue *sqp;
7527 struct sdebug_queued_cmd *sqcp;
7528 struct scsi_cmnd *scp;
7529 struct sdebug_dev_info *devip;
7530 struct sdebug_defer *sd_dp;
7532 sqp = sdebug_q_arr + queue_num;
7534 spin_lock_irqsave(&sqp->qc_lock, iflags);
7536 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7537 if (qc_idx >= sdebug_max_queue)
7540 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7543 if (!test_bit(qc_idx, sqp->in_use_bm))
7546 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7548 if (qc_idx >= sdebug_max_queue)
7551 sqcp = &sqp->qc_arr[qc_idx];
7552 sd_dp = sqcp->sd_dp;
7553 if (unlikely(!sd_dp))
7556 if (unlikely(scp == NULL)) {
7557 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7558 queue_num, qc_idx, __func__);
7561 if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7562 if (kt_from_boot < sd_dp->cmpl_ts)
7565 } else /* ignoring non REQ_POLLED requests */
7567 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7569 atomic_dec(&devip->num_in_q);
7571 pr_err("devip=NULL from %s\n", __func__);
7572 if (unlikely(atomic_read(&retired_max_queue) > 0))
7575 sqcp->a_cmnd = NULL;
7576 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7577 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7578 sqp, queue_num, qc_idx, __func__);
7581 if (unlikely(retiring)) { /* user has reduced max_queue */
7584 retval = atomic_read(&retired_max_queue);
7585 if (qc_idx >= retval) {
7586 pr_err("index %d too large\n", retval);
7589 k = find_last_bit(sqp->in_use_bm, retval);
7590 if ((k < sdebug_max_queue) || (k == retval))
7591 atomic_set(&retired_max_queue, 0);
7593 atomic_set(&retired_max_queue, k + 1);
7595 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7596 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7597 scsi_done(scp); /* callback to mid level */
7599 spin_lock_irqsave(&sqp->qc_lock, iflags);
7600 if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7605 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7607 if (num_entries > 0)
7608 atomic_add(num_entries, &sdeb_mq_poll_count);
7612 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7613 struct scsi_cmnd *scp)
7616 struct scsi_device *sdp = scp->device;
7617 const struct opcode_info_t *oip;
7618 const struct opcode_info_t *r_oip;
7619 struct sdebug_dev_info *devip;
7620 u8 *cmd = scp->cmnd;
7621 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7622 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7625 u64 lun_index = sdp->lun & 0x3FFF;
7632 scsi_set_resid(scp, 0);
7633 if (sdebug_statistics) {
7634 atomic_inc(&sdebug_cmnd_count);
7635 inject_now = inject_on_this_cmd();
7639 if (unlikely(sdebug_verbose &&
7640 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7645 sb = (int)sizeof(b);
7647 strcpy(b, "too long, over 32 bytes");
7649 for (k = 0, n = 0; k < len && n < sb; ++k)
7650 n += scnprintf(b + n, sb - n, "%02x ",
7653 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7654 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7656 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7657 return SCSI_MLQUEUE_HOST_BUSY;
7658 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7659 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7662 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7663 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7664 devip = (struct sdebug_dev_info *)sdp->hostdata;
7665 if (unlikely(!devip)) {
7666 devip = find_build_dev_info(sdp);
7670 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7671 atomic_set(&sdeb_inject_pending, 1);
7673 na = oip->num_attached;
7675 if (na) { /* multiple commands with this opcode */
7677 if (FF_SA & r_oip->flags) {
7678 if (F_SA_LOW & oip->flags)
7681 sa = get_unaligned_be16(cmd + 8);
7682 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7683 if (opcode == oip->opcode && sa == oip->sa)
7686 } else { /* since no service action only check opcode */
7687 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7688 if (opcode == oip->opcode)
7693 if (F_SA_LOW & r_oip->flags)
7694 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7695 else if (F_SA_HIGH & r_oip->flags)
7696 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7698 mk_sense_invalid_opcode(scp);
7701 } /* else (when na==0) we assume the oip is a match */
7703 if (unlikely(F_INV_OP & flags)) {
7704 mk_sense_invalid_opcode(scp);
7707 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7709 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7710 my_name, opcode, " supported for wlun");
7711 mk_sense_invalid_opcode(scp);
7714 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7718 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7719 rem = ~oip->len_mask[k] & cmd[k];
7721 for (j = 7; j >= 0; --j, rem <<= 1) {
7725 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7730 if (unlikely(!(F_SKIP_UA & flags) &&
7731 find_first_bit(devip->uas_bm,
7732 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7733 errsts = make_ua(scp, devip);
7737 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7738 atomic_read(&devip->stopped))) {
7739 errsts = resp_not_ready(scp, devip);
7743 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7745 if (unlikely(sdebug_every_nth)) {
7746 if (fake_timeout(scp))
7747 return 0; /* ignore command: make trouble */
7749 if (likely(oip->pfp))
7750 pfp = oip->pfp; /* calls a resp_* function */
7752 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7755 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7756 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7757 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7758 sdebug_ndelay > 10000)) {
7760 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7761 * for Start Stop Unit (SSU) want at least 1 second delay and
7762 * if sdebug_jdelay>1 want a long delay of that many seconds.
7763 * For Synchronize Cache want 1/20 of SSU's delay.
7765 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7766 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7768 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7769 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7771 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7774 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7776 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7779 static struct scsi_host_template sdebug_driver_template = {
7780 .show_info = scsi_debug_show_info,
7781 .write_info = scsi_debug_write_info,
7782 .proc_name = sdebug_proc_name,
7783 .name = "SCSI DEBUG",
7784 .info = scsi_debug_info,
7785 .slave_alloc = scsi_debug_slave_alloc,
7786 .slave_configure = scsi_debug_slave_configure,
7787 .slave_destroy = scsi_debug_slave_destroy,
7788 .ioctl = scsi_debug_ioctl,
7789 .queuecommand = scsi_debug_queuecommand,
7790 .change_queue_depth = sdebug_change_qdepth,
7791 .map_queues = sdebug_map_queues,
7792 .mq_poll = sdebug_blk_mq_poll,
7793 .eh_abort_handler = scsi_debug_abort,
7794 .eh_device_reset_handler = scsi_debug_device_reset,
7795 .eh_target_reset_handler = scsi_debug_target_reset,
7796 .eh_bus_reset_handler = scsi_debug_bus_reset,
7797 .eh_host_reset_handler = scsi_debug_host_reset,
7798 .can_queue = SDEBUG_CANQUEUE,
7800 .sg_tablesize = SG_MAX_SEGMENTS,
7801 .cmd_per_lun = DEF_CMD_PER_LUN,
7803 .max_segment_size = -1U,
7804 .module = THIS_MODULE,
7805 .track_queue_depth = 1,
7808 static int sdebug_driver_probe(struct device *dev)
7811 struct sdebug_host_info *sdbg_host;
7812 struct Scsi_Host *hpnt;
7815 sdbg_host = to_sdebug_host(dev);
7817 sdebug_driver_template.can_queue = sdebug_max_queue;
7818 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7819 if (!sdebug_clustering)
7820 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7822 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7824 pr_err("scsi_host_alloc failed\n");
7828 if (submit_queues > nr_cpu_ids) {
7829 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7830 my_name, submit_queues, nr_cpu_ids);
7831 submit_queues = nr_cpu_ids;
7834 * Decide whether to tell scsi subsystem that we want mq. The
7835 * following should give the same answer for each host.
7837 hpnt->nr_hw_queues = submit_queues;
7838 if (sdebug_host_max_queue)
7839 hpnt->host_tagset = 1;
7841 /* poll queues are possible for nr_hw_queues > 1 */
7842 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7843 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7844 my_name, poll_queues, hpnt->nr_hw_queues);
7849 * Poll queues don't need interrupts, but we need at least one I/O queue
7850 * left over for non-polled I/O.
7851 * If condition not met, trim poll_queues to 1 (just for simplicity).
7853 if (poll_queues >= submit_queues) {
7854 if (submit_queues < 3)
7855 pr_warn("%s: trim poll_queues to 1\n", my_name);
7857 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7858 my_name, submit_queues - 1);
7864 sdbg_host->shost = hpnt;
7865 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7866 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7867 hpnt->max_id = sdebug_num_tgts + 1;
7869 hpnt->max_id = sdebug_num_tgts;
7870 /* = sdebug_max_luns; */
7871 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7875 switch (sdebug_dif) {
7877 case T10_PI_TYPE1_PROTECTION:
7878 hprot = SHOST_DIF_TYPE1_PROTECTION;
7880 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7883 case T10_PI_TYPE2_PROTECTION:
7884 hprot = SHOST_DIF_TYPE2_PROTECTION;
7886 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7889 case T10_PI_TYPE3_PROTECTION:
7890 hprot = SHOST_DIF_TYPE3_PROTECTION;
7892 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7897 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7901 scsi_host_set_prot(hpnt, hprot);
7903 if (have_dif_prot || sdebug_dix)
7904 pr_info("host protection%s%s%s%s%s%s%s\n",
7905 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7906 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7907 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7908 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7909 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7910 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7911 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7913 if (sdebug_guard == 1)
7914 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7916 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7918 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7919 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7920 if (sdebug_every_nth) /* need stats counters for every_nth */
7921 sdebug_statistics = true;
7922 error = scsi_add_host(hpnt, &sdbg_host->dev);
7924 pr_err("scsi_add_host failed\n");
7926 scsi_host_put(hpnt);
7928 scsi_scan_host(hpnt);
7934 static void sdebug_driver_remove(struct device *dev)
7936 struct sdebug_host_info *sdbg_host;
7937 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7939 sdbg_host = to_sdebug_host(dev);
7941 scsi_remove_host(sdbg_host->shost);
7943 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7945 list_del(&sdbg_devinfo->dev_list);
7946 kfree(sdbg_devinfo->zstate);
7947 kfree(sdbg_devinfo);
7950 scsi_host_put(sdbg_host->shost);
7953 static int pseudo_lld_bus_match(struct device *dev,
7954 struct device_driver *dev_driver)
7959 static struct bus_type pseudo_lld_bus = {
7961 .match = pseudo_lld_bus_match,
7962 .probe = sdebug_driver_probe,
7963 .remove = sdebug_driver_remove,
7964 .drv_groups = sdebug_drv_groups,