1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
45 #include <net/checksum.h>
47 #include <asm/unaligned.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
65 #define MY_NAME "scsi_debug"
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST 1
109 #define DEF_NUM_TGTS 1
110 #define DEF_MAX_LUNS 1
111 /* With these defaults, this driver will make 1 host with 1 target
112 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT 0
118 #define DEF_DEV_SIZE_MB 8
119 #define DEF_ZBC_DEV_SIZE_MB 128
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE 0
124 #define DEF_EVERY_NTH 0
125 #define DEF_FAKE_RW 0
127 #define DEF_HOST_LOCK 0
130 #define DEF_LBPWS10 0
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0 0
135 #define DEF_NUM_PARTS 0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB 0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB 128
161 #define DEF_ZBC_MAX_OPEN_ZONES 8
162 #define DEF_ZBC_NR_CONV_ZONES 1
164 #define SDEBUG_LUN_0_VAL 0
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE 1
168 #define SDEBUG_OPT_MEDIUM_ERR 2
169 #define SDEBUG_OPT_TIMEOUT 4
170 #define SDEBUG_OPT_RECOVERED_ERR 8
171 #define SDEBUG_OPT_TRANSPORT_ERR 16
172 #define SDEBUG_OPT_DIF_ERR 32
173 #define SDEBUG_OPT_DIX_ERR 64
174 #define SDEBUG_OPT_MAC_TIMEOUT 128
175 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
176 #define SDEBUG_OPT_Q_NOISE 0x200
177 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
178 #define SDEBUG_OPT_RARE_TSF 0x800
179 #define SDEBUG_OPT_N_WCE 0x1000
180 #define SDEBUG_OPT_RESET_NOISE 0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
182 #define SDEBUG_OPT_HOST_BUSY 0x8000
183 #define SDEBUG_OPT_CMD_ABORT 0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 SDEBUG_OPT_TRANSPORT_ERR | \
188 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 SDEBUG_OPT_SHORT_TRANSFER | \
190 SDEBUG_OPT_HOST_BUSY | \
191 SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196 * priority order. In the subset implemented here lower numbers have higher
197 * priority. The UA numbers should be a sequence starting from 0 with
198 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210 * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN 1 /* Data-in command (e.g. READ) */
227 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
230 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
233 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
236 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
238 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
247 #define SDEBUG_MAX_PARTS 4
249 #define SDEBUG_MAX_CMD_LEN 32
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 /* Zone types (zbcr05 table 25) */
258 /* ZBC_ZTYPE_SOBR = 0x4, */
262 /* enumeration names taken from table 26, zbcr05 */
264 ZBC_NOT_WRITE_POINTER = 0x0,
266 ZC2_IMPLICIT_OPEN = 0x2,
267 ZC3_EXPLICIT_OPEN = 0x3,
274 struct sdeb_zone_state { /* ZBC: per zone state */
275 enum sdebug_z_type z_type;
276 enum sdebug_z_cond z_cond;
277 bool z_non_seq_resource;
283 struct sdebug_dev_info {
284 struct list_head dev_list;
285 unsigned int channel;
289 struct sdebug_host_info *sdbg_host;
290 unsigned long uas_bm[1];
292 atomic_t stopped; /* 1: by SSU, 2: device start */
295 /* For ZBC devices */
296 enum blk_zoned_model zmodel;
299 unsigned int zsize_shift;
300 unsigned int nr_zones;
301 unsigned int nr_conv_zones;
302 unsigned int nr_seq_zones;
303 unsigned int nr_imp_open;
304 unsigned int nr_exp_open;
305 unsigned int nr_closed;
306 unsigned int max_open;
307 ktime_t create_ts; /* time since bootup that this device was created */
308 struct sdeb_zone_state *zstate;
311 struct sdebug_host_info {
312 struct list_head host_list;
313 int si_idx; /* sdeb_store_info (per host) xarray index */
314 struct Scsi_Host *shost;
316 struct list_head dev_info_list;
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 rwlock_t macc_lck; /* for atomic media access on this store */
322 u8 *storep; /* user data storage (ram) */
323 struct t10_pi_tuple *dif_storep; /* protection info */
324 void *map_storep; /* provisioning map */
327 #define to_sdebug_host(d) \
328 container_of(d, struct sdebug_host_info, dev)
330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
333 struct sdebug_defer {
335 struct execute_work ew;
336 ktime_t cmpl_ts;/* time since boot to complete this cmd */
337 int sqa_idx; /* index of sdebug_queue array */
338 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
339 int hc_idx; /* hostwide tag index */
344 bool aborted; /* true when blk_abort_request() already called */
345 enum sdeb_defer_type defer_t;
348 struct sdebug_queued_cmd {
349 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 * instance indicates this slot is in use.
352 struct sdebug_defer *sd_dp;
353 struct scsi_cmnd *a_cmnd;
356 struct sdebug_queue {
357 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
358 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
360 atomic_t blocked; /* to temporarily stop more being queued */
363 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
364 static atomic_t sdebug_completions; /* count of deferred completions */
365 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
366 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
367 static atomic_t sdeb_inject_pending;
368 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
370 struct opcode_info_t {
371 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
372 /* for terminating element */
373 u8 opcode; /* if num_attached > 0, preferred */
374 u16 sa; /* service action */
375 u32 flags; /* OR-ed set of SDEB_F_* */
376 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
377 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
378 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
379 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
382 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
383 enum sdeb_opcode_index {
384 SDEB_I_INVALID_OPCODE = 0,
386 SDEB_I_REPORT_LUNS = 2,
387 SDEB_I_REQUEST_SENSE = 3,
388 SDEB_I_TEST_UNIT_READY = 4,
389 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
390 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
391 SDEB_I_LOG_SENSE = 7,
392 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
393 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
394 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
395 SDEB_I_START_STOP = 11,
396 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
397 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
398 SDEB_I_MAINT_IN = 14,
399 SDEB_I_MAINT_OUT = 15,
400 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
401 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
402 SDEB_I_RESERVE = 18, /* 6, 10 */
403 SDEB_I_RELEASE = 19, /* 6, 10 */
404 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
405 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
406 SDEB_I_ATA_PT = 22, /* 12, 16 */
407 SDEB_I_SEND_DIAG = 23,
409 SDEB_I_WRITE_BUFFER = 25,
410 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
411 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
412 SDEB_I_COMP_WRITE = 28,
413 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
414 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
415 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
416 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
420 static const unsigned char opcode_ind_arr[256] = {
421 /* 0x0; 0x0->0x1f: 6 byte cdbs */
422 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
424 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
425 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
427 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
428 SDEB_I_ALLOW_REMOVAL, 0,
429 /* 0x20; 0x20->0x3f: 10 byte cdbs */
430 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
431 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
432 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
433 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
434 /* 0x40; 0x40->0x5f: 10 byte cdbs */
435 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
437 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
439 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
440 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
441 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
442 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 0, SDEB_I_VARIABLE_LEN,
444 /* 0x80; 0x80->0x9f: 16 byte cdbs */
445 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
446 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
447 0, 0, 0, SDEB_I_VERIFY,
448 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
449 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
450 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
451 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
452 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
453 SDEB_I_MAINT_OUT, 0, 0, 0,
454 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
455 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0,
457 0, 0, 0, 0, 0, 0, 0, 0,
458 /* 0xc0; 0xc0->0xff: vendor specific */
459 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
466 * The following "response" functions return the SCSI mid-level's 4 byte
467 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
468 * command completion, they can mask their return value with
469 * SDEG_RES_IMMED_MASK .
471 #define SDEG_RES_IMMED_MASK 0x40000000
473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int sdebug_do_add_host(bool mk_new_store);
504 static int sdebug_add_host_helper(int per_host_idx);
505 static void sdebug_do_remove_host(bool the_end);
506 static int sdebug_add_store(void);
507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
508 static void sdebug_erase_all_stores(bool apart_from_first);
511 * The following are overflow arrays for cdbs that "hit" the same index in
512 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513 * should be placed in opcode_info_arr[], the others should be placed here.
515 static const struct opcode_info_t msense_iarr[] = {
516 {0, 0x1a, 0, F_D_IN, NULL, NULL,
517 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 static const struct opcode_info_t mselect_iarr[] = {
521 {0, 0x15, 0, F_D_OUT, NULL, NULL,
522 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 static const struct opcode_info_t read_iarr[] = {
526 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
529 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
536 static const struct opcode_info_t write_iarr[] = {
537 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
538 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
540 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
541 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
543 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
544 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 0xbf, 0xc7, 0, 0, 0, 0} },
548 static const struct opcode_info_t verify_iarr[] = {
549 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
560 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
561 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
564 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
569 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
570 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
578 static const struct opcode_info_t write_same_iarr[] = {
579 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
584 static const struct opcode_info_t reserve_iarr[] = {
585 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
586 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 static const struct opcode_info_t release_iarr[] = {
590 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
591 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
606 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
607 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
610 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
613 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
618 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
619 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626 * plus the terminating elements for logic that scans this table such as
627 * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
630 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
631 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 0, 0} }, /* REPORT LUNS */
637 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
643 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
644 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
646 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
647 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
649 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
651 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
652 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
654 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
656 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
658 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 resp_write_dt0, write_iarr, /* WRITE(16) */
660 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
671 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
673 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 0xff, 0, 0xc7, 0, 0, 0, 0} },
676 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
680 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
684 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
686 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
688 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 NULL, release_iarr, /* RELEASE(10) <no response function> */
692 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
695 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
702 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
706 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 0, 0, 0, 0} }, /* WRITE_BUFFER */
709 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
711 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
713 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 resp_sync_cache, sync_cache_iarr,
715 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
717 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
720 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 resp_pre_fetch, pre_fetch_iarr,
722 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 0, 0, 0, 0} }, /* PRE-FETCH (10) */
726 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
735 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
736 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue; /* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
758 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
759 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
760 static int sdebug_no_uld;
761 static int sdebug_num_parts = DEF_NUM_PARTS;
762 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
763 static int sdebug_opt_blks = DEF_OPT_BLKS;
764 static int sdebug_opts = DEF_OPTS;
765 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
766 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
767 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
768 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
769 static int sdebug_sector_size = DEF_SECTOR_SIZE;
770 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
771 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
772 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
773 static unsigned int sdebug_lbpu = DEF_LBPU;
774 static unsigned int sdebug_lbpws = DEF_LBPWS;
775 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
776 static unsigned int sdebug_lbprz = DEF_LBPRZ;
777 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
778 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
779 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
780 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
781 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
782 static int sdebug_uuid_ctl = DEF_UUID_CTL;
783 static bool sdebug_random = DEF_RANDOM;
784 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
785 static bool sdebug_removable = DEF_REMOVABLE;
786 static bool sdebug_clustering;
787 static bool sdebug_host_lock = DEF_HOST_LOCK;
788 static bool sdebug_strict = DEF_STRICT;
789 static bool sdebug_any_injecting_opt;
790 static bool sdebug_no_rwlock;
791 static bool sdebug_verbose;
792 static bool have_dif_prot;
793 static bool write_since_sync;
794 static bool sdebug_statistics = DEF_STATISTICS;
795 static bool sdebug_wp;
796 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
798 static char *sdeb_zbc_model_s;
800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
801 SAM_LUN_AM_FLAT = 0x1,
802 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
803 SAM_LUN_AM_EXTENDED = 0x3};
804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
807 static unsigned int sdebug_store_sectors;
808 static sector_t sdebug_capacity; /* in sectors */
810 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
811 may still need them */
812 static int sdebug_heads; /* heads per disk */
813 static int sdebug_cylinders_per; /* cylinders per surface */
814 static int sdebug_sectors_per; /* sectors per cylinder */
816 static LIST_HEAD(sdebug_host_list);
817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
819 static struct xarray per_store_arr;
820 static struct xarray *per_store_ap = &per_store_arr;
821 static int sdeb_first_idx = -1; /* invalid index ==> none created */
822 static int sdeb_most_recent_idx = -1;
823 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
825 static unsigned long map_size;
826 static int num_aborts;
827 static int num_dev_resets;
828 static int num_target_resets;
829 static int num_bus_resets;
830 static int num_host_resets;
831 static int dix_writes;
832 static int dix_reads;
833 static int dif_errors;
835 /* ZBC global data */
836 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
837 static int sdeb_zbc_zone_cap_mb;
838 static int sdeb_zbc_zone_size_mb;
839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
842 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
843 static int poll_queues; /* iouring iopoll interface.*/
844 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
846 static DEFINE_RWLOCK(atomic_rw);
847 static DEFINE_RWLOCK(atomic_rw2);
849 static rwlock_t *ramdisk_lck_a[2];
851 static char sdebug_proc_name[] = MY_NAME;
852 static const char *my_name = MY_NAME;
854 static struct bus_type pseudo_lld_bus;
856 static struct device_driver sdebug_driverfs_driver = {
857 .name = sdebug_proc_name,
858 .bus = &pseudo_lld_bus,
861 static const int check_condition_result =
862 SAM_STAT_CHECK_CONDITION;
864 static const int illegal_condition_result =
865 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
867 static const int device_qfull_result =
868 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
870 static const int condition_met_result = SAM_STAT_CONDITION_MET;
873 /* Only do the extra work involved in logical block provisioning if one or
874 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
875 * real reads and writes (i.e. not skipping them for speed).
877 static inline bool scsi_debug_lbp(void)
879 return 0 == sdebug_fake_rw &&
880 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
883 static void *lba2fake_store(struct sdeb_store_info *sip,
884 unsigned long long lba)
886 struct sdeb_store_info *lsip = sip;
888 lba = do_div(lba, sdebug_store_sectors);
889 if (!sip || !sip->storep) {
891 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
893 return lsip->storep + lba * sdebug_sector_size;
896 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
899 sector = sector_div(sector, sdebug_store_sectors);
901 return sip->dif_storep + sector;
904 static void sdebug_max_tgts_luns(void)
906 struct sdebug_host_info *sdbg_host;
907 struct Scsi_Host *hpnt;
909 spin_lock(&sdebug_host_list_lock);
910 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
911 hpnt = sdbg_host->shost;
912 if ((hpnt->this_id >= 0) &&
913 (sdebug_num_tgts > hpnt->this_id))
914 hpnt->max_id = sdebug_num_tgts + 1;
916 hpnt->max_id = sdebug_num_tgts;
917 /* sdebug_max_luns; */
918 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
920 spin_unlock(&sdebug_host_list_lock);
923 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
925 /* Set in_bit to -1 to indicate no bit position of invalid field */
926 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
927 enum sdeb_cmd_data c_d,
928 int in_byte, int in_bit)
930 unsigned char *sbuff;
934 sbuff = scp->sense_buffer;
936 sdev_printk(KERN_ERR, scp->device,
937 "%s: sense_buffer is NULL\n", __func__);
940 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
941 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
942 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
943 memset(sks, 0, sizeof(sks));
949 sks[0] |= 0x7 & in_bit;
951 put_unaligned_be16(in_byte, sks + 1);
957 memcpy(sbuff + sl + 4, sks, 3);
959 memcpy(sbuff + 15, sks, 3);
961 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
962 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
963 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
966 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
968 if (!scp->sense_buffer) {
969 sdev_printk(KERN_ERR, scp->device,
970 "%s: sense_buffer is NULL\n", __func__);
973 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
975 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
978 sdev_printk(KERN_INFO, scp->device,
979 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980 my_name, key, asc, asq);
983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
985 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
991 if (sdebug_verbose) {
993 sdev_printk(KERN_INFO, dev,
994 "%s: BLKFLSBUF [0x1261]\n", __func__);
995 else if (0x5331 == cmd)
996 sdev_printk(KERN_INFO, dev,
997 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1000 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1004 /* return -ENOTTY; // correct return but upsets fdisk */
1007 static void config_cdb_len(struct scsi_device *sdev)
1009 switch (sdebug_cdb_len) {
1010 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011 sdev->use_10_for_rw = false;
1012 sdev->use_16_for_rw = false;
1013 sdev->use_10_for_ms = false;
1015 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016 sdev->use_10_for_rw = true;
1017 sdev->use_16_for_rw = false;
1018 sdev->use_10_for_ms = false;
1020 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021 sdev->use_10_for_rw = true;
1022 sdev->use_16_for_rw = false;
1023 sdev->use_10_for_ms = true;
1026 sdev->use_10_for_rw = false;
1027 sdev->use_16_for_rw = true;
1028 sdev->use_10_for_ms = true;
1030 case 32: /* No knobs to suggest this so same as 16 for now */
1031 sdev->use_10_for_rw = false;
1032 sdev->use_16_for_rw = true;
1033 sdev->use_10_for_ms = true;
1036 pr_warn("unexpected cdb_len=%d, force to 10\n",
1038 sdev->use_10_for_rw = true;
1039 sdev->use_16_for_rw = false;
1040 sdev->use_10_for_ms = false;
1041 sdebug_cdb_len = 10;
1046 static void all_config_cdb_len(void)
1048 struct sdebug_host_info *sdbg_host;
1049 struct Scsi_Host *shost;
1050 struct scsi_device *sdev;
1052 spin_lock(&sdebug_host_list_lock);
1053 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 shost = sdbg_host->shost;
1055 shost_for_each_device(sdev, shost) {
1056 config_cdb_len(sdev);
1059 spin_unlock(&sdebug_host_list_lock);
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1064 struct sdebug_host_info *sdhp;
1065 struct sdebug_dev_info *dp;
1067 spin_lock(&sdebug_host_list_lock);
1068 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 if ((devip->sdbg_host == dp->sdbg_host) &&
1071 (devip->target == dp->target))
1072 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1075 spin_unlock(&sdebug_host_list_lock);
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1082 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 if (k != SDEBUG_NUM_UAS) {
1084 const char *cp = NULL;
1088 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 POWER_ON_RESET_ASCQ);
1091 cp = "power on reset";
1093 case SDEBUG_UA_POOCCUR:
1094 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 POWER_ON_OCCURRED_ASCQ);
1097 cp = "power on occurred";
1099 case SDEBUG_UA_BUS_RESET:
1100 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1105 case SDEBUG_UA_MODE_CHANGED:
1106 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1109 cp = "mode parameters changed";
1111 case SDEBUG_UA_CAPACITY_CHANGED:
1112 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1113 CAPACITY_CHANGED_ASCQ);
1115 cp = "capacity data changed";
1117 case SDEBUG_UA_MICROCODE_CHANGED:
1118 mk_sense_buffer(scp, UNIT_ATTENTION,
1120 MICROCODE_CHANGED_ASCQ);
1122 cp = "microcode has been changed";
1124 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1125 mk_sense_buffer(scp, UNIT_ATTENTION,
1127 MICROCODE_CHANGED_WO_RESET_ASCQ);
1129 cp = "microcode has been changed without reset";
1131 case SDEBUG_UA_LUNS_CHANGED:
1133 * SPC-3 behavior is to report a UNIT ATTENTION with
1134 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1135 * on the target, until a REPORT LUNS command is
1136 * received. SPC-4 behavior is to report it only once.
1137 * NOTE: sdebug_scsi_level does not use the same
1138 * values as struct scsi_device->scsi_level.
1140 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1141 clear_luns_changed_on_target(devip);
1142 mk_sense_buffer(scp, UNIT_ATTENTION,
1146 cp = "reported luns data has changed";
1149 pr_warn("unexpected unit attention code=%d\n", k);
1154 clear_bit(k, devip->uas_bm);
1156 sdev_printk(KERN_INFO, scp->device,
1157 "%s reports: Unit attention: %s\n",
1159 return check_condition_result;
1164 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1165 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1169 struct scsi_data_buffer *sdb = &scp->sdb;
1173 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1174 return DID_ERROR << 16;
1176 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1178 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1183 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1184 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1185 * calls, not required to write in ascending offset order. Assumes resid
1186 * set to scsi_bufflen() prior to any calls.
1188 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1189 int arr_len, unsigned int off_dst)
1191 unsigned int act_len, n;
1192 struct scsi_data_buffer *sdb = &scp->sdb;
1193 off_t skip = off_dst;
1195 if (sdb->length <= off_dst)
1197 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1198 return DID_ERROR << 16;
1200 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1201 arr, arr_len, skip);
1202 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1203 __func__, off_dst, scsi_bufflen(scp), act_len,
1204 scsi_get_resid(scp));
1205 n = scsi_bufflen(scp) - (off_dst + act_len);
1206 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1210 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1211 * 'arr' or -1 if error.
1213 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1216 if (!scsi_bufflen(scp))
1218 if (scp->sc_data_direction != DMA_TO_DEVICE)
1221 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1225 static char sdebug_inq_vendor_id[9] = "Linux ";
1226 static char sdebug_inq_product_id[17] = "scsi_debug ";
1227 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1228 /* Use some locally assigned NAAs for SAS addresses. */
1229 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1230 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1231 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1233 /* Device identification VPD page. Returns number of bytes placed in arr */
1234 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1235 int target_dev_id, int dev_id_num,
1236 const char *dev_id_str, int dev_id_str_len,
1237 const uuid_t *lu_name)
1242 port_a = target_dev_id + 1;
1243 /* T10 vendor identifier field format (faked) */
1244 arr[0] = 0x2; /* ASCII */
1247 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1248 memcpy(&arr[12], sdebug_inq_product_id, 16);
1249 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1250 num = 8 + 16 + dev_id_str_len;
1253 if (dev_id_num >= 0) {
1254 if (sdebug_uuid_ctl) {
1255 /* Locally assigned UUID */
1256 arr[num++] = 0x1; /* binary (not necessarily sas) */
1257 arr[num++] = 0xa; /* PIV=0, lu, naa */
1260 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1262 memcpy(arr + num, lu_name, 16);
1265 /* NAA-3, Logical unit identifier (binary) */
1266 arr[num++] = 0x1; /* binary (not necessarily sas) */
1267 arr[num++] = 0x3; /* PIV=0, lu, naa */
1270 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1273 /* Target relative port number */
1274 arr[num++] = 0x61; /* proto=sas, binary */
1275 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1276 arr[num++] = 0x0; /* reserved */
1277 arr[num++] = 0x4; /* length */
1278 arr[num++] = 0x0; /* reserved */
1279 arr[num++] = 0x0; /* reserved */
1281 arr[num++] = 0x1; /* relative port A */
1283 /* NAA-3, Target port identifier */
1284 arr[num++] = 0x61; /* proto=sas, binary */
1285 arr[num++] = 0x93; /* piv=1, target port, naa */
1288 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1290 /* NAA-3, Target port group identifier */
1291 arr[num++] = 0x61; /* proto=sas, binary */
1292 arr[num++] = 0x95; /* piv=1, target port group id */
1297 put_unaligned_be16(port_group_id, arr + num);
1299 /* NAA-3, Target device identifier */
1300 arr[num++] = 0x61; /* proto=sas, binary */
1301 arr[num++] = 0xa3; /* piv=1, target device, naa */
1304 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1306 /* SCSI name string: Target device identifier */
1307 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1308 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1311 memcpy(arr + num, "naa.32222220", 12);
1313 snprintf(b, sizeof(b), "%08X", target_dev_id);
1314 memcpy(arr + num, b, 8);
1316 memset(arr + num, 0, 4);
1321 static unsigned char vpd84_data[] = {
1322 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1323 0x22,0x22,0x22,0x0,0xbb,0x1,
1324 0x22,0x22,0x22,0x0,0xbb,0x2,
1327 /* Software interface identification VPD page */
1328 static int inquiry_vpd_84(unsigned char *arr)
1330 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1331 return sizeof(vpd84_data);
1334 /* Management network addresses VPD page */
1335 static int inquiry_vpd_85(unsigned char *arr)
1338 const char *na1 = "https://www.kernel.org/config";
1339 const char *na2 = "http://www.kernel.org/log";
1342 arr[num++] = 0x1; /* lu, storage config */
1343 arr[num++] = 0x0; /* reserved */
1348 plen = ((plen / 4) + 1) * 4;
1349 arr[num++] = plen; /* length, null termianted, padded */
1350 memcpy(arr + num, na1, olen);
1351 memset(arr + num + olen, 0, plen - olen);
1354 arr[num++] = 0x4; /* lu, logging */
1355 arr[num++] = 0x0; /* reserved */
1360 plen = ((plen / 4) + 1) * 4;
1361 arr[num++] = plen; /* length, null terminated, padded */
1362 memcpy(arr + num, na2, olen);
1363 memset(arr + num + olen, 0, plen - olen);
1369 /* SCSI ports VPD page */
1370 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1375 port_a = target_dev_id + 1;
1376 port_b = port_a + 1;
1377 arr[num++] = 0x0; /* reserved */
1378 arr[num++] = 0x0; /* reserved */
1380 arr[num++] = 0x1; /* relative port 1 (primary) */
1381 memset(arr + num, 0, 6);
1384 arr[num++] = 12; /* length tp descriptor */
1385 /* naa-5 target port identifier (A) */
1386 arr[num++] = 0x61; /* proto=sas, binary */
1387 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1388 arr[num++] = 0x0; /* reserved */
1389 arr[num++] = 0x8; /* length */
1390 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1392 arr[num++] = 0x0; /* reserved */
1393 arr[num++] = 0x0; /* reserved */
1395 arr[num++] = 0x2; /* relative port 2 (secondary) */
1396 memset(arr + num, 0, 6);
1399 arr[num++] = 12; /* length tp descriptor */
1400 /* naa-5 target port identifier (B) */
1401 arr[num++] = 0x61; /* proto=sas, binary */
1402 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1403 arr[num++] = 0x0; /* reserved */
1404 arr[num++] = 0x8; /* length */
1405 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1412 static unsigned char vpd89_data[] = {
1413 /* from 4th byte */ 0,0,0,0,
1414 'l','i','n','u','x',' ',' ',' ',
1415 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1417 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1419 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1420 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1422 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1428 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1429 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1430 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1432 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1433 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1434 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1439 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1440 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1441 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1456 /* ATA Information VPD page */
1457 static int inquiry_vpd_89(unsigned char *arr)
1459 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1460 return sizeof(vpd89_data);
1464 static unsigned char vpdb0_data[] = {
1465 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1466 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1471 /* Block limits VPD page (SBC-3) */
1472 static int inquiry_vpd_b0(unsigned char *arr)
1476 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1478 /* Optimal transfer length granularity */
1479 if (sdebug_opt_xferlen_exp != 0 &&
1480 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1481 gran = 1 << sdebug_opt_xferlen_exp;
1483 gran = 1 << sdebug_physblk_exp;
1484 put_unaligned_be16(gran, arr + 2);
1486 /* Maximum Transfer Length */
1487 if (sdebug_store_sectors > 0x400)
1488 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1490 /* Optimal Transfer Length */
1491 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1494 /* Maximum Unmap LBA Count */
1495 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1497 /* Maximum Unmap Block Descriptor Count */
1498 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1501 /* Unmap Granularity Alignment */
1502 if (sdebug_unmap_alignment) {
1503 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1504 arr[28] |= 0x80; /* UGAVALID */
1507 /* Optimal Unmap Granularity */
1508 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1510 /* Maximum WRITE SAME Length */
1511 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1513 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1515 return sizeof(vpdb0_data);
1518 /* Block device characteristics VPD page (SBC-3) */
1519 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1521 memset(arr, 0, 0x3c);
1523 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1525 arr[3] = 5; /* less than 1.8" */
1526 if (devip->zmodel == BLK_ZONED_HA)
1527 arr[4] = 1 << 4; /* zoned field = 01b */
1532 /* Logical block provisioning VPD page (SBC-4) */
1533 static int inquiry_vpd_b2(unsigned char *arr)
1535 memset(arr, 0, 0x4);
1536 arr[0] = 0; /* threshold exponent */
1543 if (sdebug_lbprz && scsi_debug_lbp())
1544 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1545 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1546 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1547 /* threshold_percentage=0 */
1551 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1552 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1554 memset(arr, 0, 0x3c);
1555 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1557 * Set Optimal number of open sequential write preferred zones and
1558 * Optimal number of non-sequentially written sequential write
1559 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1560 * fields set to zero, apart from Max. number of open swrz_s field.
1562 put_unaligned_be32(0xffffffff, &arr[4]);
1563 put_unaligned_be32(0xffffffff, &arr[8]);
1564 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1565 put_unaligned_be32(devip->max_open, &arr[12]);
1567 put_unaligned_be32(0xffffffff, &arr[12]);
1568 if (devip->zcap < devip->zsize) {
1569 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1570 put_unaligned_be64(devip->zsize, &arr[20]);
1577 #define SDEBUG_LONG_INQ_SZ 96
1578 #define SDEBUG_MAX_INQ_ARR_SZ 584
1580 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1582 unsigned char pq_pdt;
1584 unsigned char *cmd = scp->cmnd;
1587 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1589 alloc_len = get_unaligned_be16(cmd + 3);
1590 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1592 return DID_REQUEUE << 16;
1593 is_disk = (sdebug_ptype == TYPE_DISK);
1594 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1595 is_disk_zbc = (is_disk || is_zbc);
1596 have_wlun = scsi_is_wlun(scp->device->lun);
1598 pq_pdt = TYPE_WLUN; /* present, wlun */
1599 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1600 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1602 pq_pdt = (sdebug_ptype & 0x1f);
1604 if (0x2 & cmd[1]) { /* CMDDT bit set */
1605 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1607 return check_condition_result;
1608 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1609 int lu_id_num, port_group_id, target_dev_id;
1612 int host_no = devip->sdbg_host->shost->host_no;
1614 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1615 (devip->channel & 0x7f);
1616 if (sdebug_vpd_use_hostno == 0)
1618 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1619 (devip->target * 1000) + devip->lun);
1620 target_dev_id = ((host_no + 1) * 2000) +
1621 (devip->target * 1000) - 3;
1622 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1623 if (0 == cmd[2]) { /* supported vital product data pages */
1624 arr[1] = cmd[2]; /*sanity */
1626 arr[n++] = 0x0; /* this page */
1627 arr[n++] = 0x80; /* unit serial number */
1628 arr[n++] = 0x83; /* device identification */
1629 arr[n++] = 0x84; /* software interface ident. */
1630 arr[n++] = 0x85; /* management network addresses */
1631 arr[n++] = 0x86; /* extended inquiry */
1632 arr[n++] = 0x87; /* mode page policy */
1633 arr[n++] = 0x88; /* SCSI ports */
1634 if (is_disk_zbc) { /* SBC or ZBC */
1635 arr[n++] = 0x89; /* ATA information */
1636 arr[n++] = 0xb0; /* Block limits */
1637 arr[n++] = 0xb1; /* Block characteristics */
1639 arr[n++] = 0xb2; /* LB Provisioning */
1641 arr[n++] = 0xb6; /* ZB dev. char. */
1643 arr[3] = n - 4; /* number of supported VPD pages */
1644 } else if (0x80 == cmd[2]) { /* unit serial number */
1645 arr[1] = cmd[2]; /*sanity */
1647 memcpy(&arr[4], lu_id_str, len);
1648 } else if (0x83 == cmd[2]) { /* device identification */
1649 arr[1] = cmd[2]; /*sanity */
1650 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1651 target_dev_id, lu_id_num,
1654 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1655 arr[1] = cmd[2]; /*sanity */
1656 arr[3] = inquiry_vpd_84(&arr[4]);
1657 } else if (0x85 == cmd[2]) { /* Management network addresses */
1658 arr[1] = cmd[2]; /*sanity */
1659 arr[3] = inquiry_vpd_85(&arr[4]);
1660 } else if (0x86 == cmd[2]) { /* extended inquiry */
1661 arr[1] = cmd[2]; /*sanity */
1662 arr[3] = 0x3c; /* number of following entries */
1663 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1664 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1665 else if (have_dif_prot)
1666 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1668 arr[4] = 0x0; /* no protection stuff */
1669 arr[5] = 0x7; /* head of q, ordered + simple q's */
1670 } else if (0x87 == cmd[2]) { /* mode page policy */
1671 arr[1] = cmd[2]; /*sanity */
1672 arr[3] = 0x8; /* number of following entries */
1673 arr[4] = 0x2; /* disconnect-reconnect mp */
1674 arr[6] = 0x80; /* mlus, shared */
1675 arr[8] = 0x18; /* protocol specific lu */
1676 arr[10] = 0x82; /* mlus, per initiator port */
1677 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1678 arr[1] = cmd[2]; /*sanity */
1679 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1680 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1681 arr[1] = cmd[2]; /*sanity */
1682 n = inquiry_vpd_89(&arr[4]);
1683 put_unaligned_be16(n, arr + 2);
1684 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1685 arr[1] = cmd[2]; /*sanity */
1686 arr[3] = inquiry_vpd_b0(&arr[4]);
1687 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1688 arr[1] = cmd[2]; /*sanity */
1689 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1690 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1691 arr[1] = cmd[2]; /*sanity */
1692 arr[3] = inquiry_vpd_b2(&arr[4]);
1693 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1694 arr[1] = cmd[2]; /*sanity */
1695 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1697 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1699 return check_condition_result;
1701 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1702 ret = fill_from_dev_buffer(scp, arr,
1703 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1707 /* drops through here for a standard inquiry */
1708 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1709 arr[2] = sdebug_scsi_level;
1710 arr[3] = 2; /* response_data_format==2 */
1711 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1712 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1713 if (sdebug_vpd_use_hostno == 0)
1714 arr[5] |= 0x10; /* claim: implicit TPGS */
1715 arr[6] = 0x10; /* claim: MultiP */
1716 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1717 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1718 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1719 memcpy(&arr[16], sdebug_inq_product_id, 16);
1720 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1721 /* Use Vendor Specific area to place driver date in ASCII hex */
1722 memcpy(&arr[36], sdebug_version_date, 8);
1723 /* version descriptors (2 bytes each) follow */
1724 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1725 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1727 if (is_disk) { /* SBC-4 no version claimed */
1728 put_unaligned_be16(0x600, arr + n);
1730 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1731 put_unaligned_be16(0x525, arr + n);
1733 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1734 put_unaligned_be16(0x624, arr + n);
1737 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1738 ret = fill_from_dev_buffer(scp, arr,
1739 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1744 /* See resp_iec_m_pg() for how this data is manipulated */
1745 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1748 static int resp_requests(struct scsi_cmnd *scp,
1749 struct sdebug_dev_info *devip)
1751 unsigned char *cmd = scp->cmnd;
1752 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1753 bool dsense = !!(cmd[1] & 1);
1754 u32 alloc_len = cmd[4];
1756 int stopped_state = atomic_read(&devip->stopped);
1758 memset(arr, 0, sizeof(arr));
1759 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1763 arr[2] = LOGICAL_UNIT_NOT_READY;
1764 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1768 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1769 arr[7] = 0xa; /* 18 byte sense buffer */
1770 arr[12] = LOGICAL_UNIT_NOT_READY;
1771 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1773 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1774 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1777 arr[1] = 0x0; /* NO_SENSE in sense_key */
1778 arr[2] = THRESHOLD_EXCEEDED;
1779 arr[3] = 0xff; /* Failure prediction(false) */
1783 arr[2] = 0x0; /* NO_SENSE in sense_key */
1784 arr[7] = 0xa; /* 18 byte sense buffer */
1785 arr[12] = THRESHOLD_EXCEEDED;
1786 arr[13] = 0xff; /* Failure prediction(false) */
1788 } else { /* nothing to report */
1791 memset(arr, 0, len);
1794 memset(arr, 0, len);
1799 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1802 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1804 unsigned char *cmd = scp->cmnd;
1805 int power_cond, want_stop, stopped_state;
1808 power_cond = (cmd[4] & 0xf0) >> 4;
1810 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1811 return check_condition_result;
1813 want_stop = !(cmd[4] & 1);
1814 stopped_state = atomic_read(&devip->stopped);
1815 if (stopped_state == 2) {
1816 ktime_t now_ts = ktime_get_boottime();
1818 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1819 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1821 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1822 /* tur_ms_to_ready timer extinguished */
1823 atomic_set(&devip->stopped, 0);
1827 if (stopped_state == 2) {
1829 stopped_state = 1; /* dummy up success */
1830 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1831 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1832 return check_condition_result;
1836 changing = (stopped_state != want_stop);
1838 atomic_xchg(&devip->stopped, want_stop);
1839 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1840 return SDEG_RES_IMMED_MASK;
1845 static sector_t get_sdebug_capacity(void)
1847 static const unsigned int gibibyte = 1073741824;
1849 if (sdebug_virtual_gb > 0)
1850 return (sector_t)sdebug_virtual_gb *
1851 (gibibyte / sdebug_sector_size);
1853 return sdebug_store_sectors;
1856 #define SDEBUG_READCAP_ARR_SZ 8
1857 static int resp_readcap(struct scsi_cmnd *scp,
1858 struct sdebug_dev_info *devip)
1860 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1863 /* following just in case virtual_gb changed */
1864 sdebug_capacity = get_sdebug_capacity();
1865 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1866 if (sdebug_capacity < 0xffffffff) {
1867 capac = (unsigned int)sdebug_capacity - 1;
1868 put_unaligned_be32(capac, arr + 0);
1870 put_unaligned_be32(0xffffffff, arr + 0);
1871 put_unaligned_be16(sdebug_sector_size, arr + 6);
1872 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1875 #define SDEBUG_READCAP16_ARR_SZ 32
1876 static int resp_readcap16(struct scsi_cmnd *scp,
1877 struct sdebug_dev_info *devip)
1879 unsigned char *cmd = scp->cmnd;
1880 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1883 alloc_len = get_unaligned_be32(cmd + 10);
1884 /* following just in case virtual_gb changed */
1885 sdebug_capacity = get_sdebug_capacity();
1886 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1887 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1888 put_unaligned_be32(sdebug_sector_size, arr + 8);
1889 arr[13] = sdebug_physblk_exp & 0xf;
1890 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1892 if (scsi_debug_lbp()) {
1893 arr[14] |= 0x80; /* LBPME */
1894 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1895 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1896 * in the wider field maps to 0 in this field.
1898 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1902 arr[15] = sdebug_lowest_aligned & 0xff;
1904 if (have_dif_prot) {
1905 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1906 arr[12] |= 1; /* PROT_EN */
1909 return fill_from_dev_buffer(scp, arr,
1910 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1913 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1915 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1916 struct sdebug_dev_info *devip)
1918 unsigned char *cmd = scp->cmnd;
1920 int host_no = devip->sdbg_host->shost->host_no;
1921 int port_group_a, port_group_b, port_a, port_b;
1925 alen = get_unaligned_be32(cmd + 6);
1926 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1928 return DID_REQUEUE << 16;
1930 * EVPD page 0x88 states we have two ports, one
1931 * real and a fake port with no device connected.
1932 * So we create two port groups with one port each
1933 * and set the group with port B to unavailable.
1935 port_a = 0x1; /* relative port A */
1936 port_b = 0x2; /* relative port B */
1937 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1938 (devip->channel & 0x7f);
1939 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1940 (devip->channel & 0x7f) + 0x80;
1943 * The asymmetric access state is cycled according to the host_id.
1946 if (sdebug_vpd_use_hostno == 0) {
1947 arr[n++] = host_no % 3; /* Asymm access state */
1948 arr[n++] = 0x0F; /* claim: all states are supported */
1950 arr[n++] = 0x0; /* Active/Optimized path */
1951 arr[n++] = 0x01; /* only support active/optimized paths */
1953 put_unaligned_be16(port_group_a, arr + n);
1955 arr[n++] = 0; /* Reserved */
1956 arr[n++] = 0; /* Status code */
1957 arr[n++] = 0; /* Vendor unique */
1958 arr[n++] = 0x1; /* One port per group */
1959 arr[n++] = 0; /* Reserved */
1960 arr[n++] = 0; /* Reserved */
1961 put_unaligned_be16(port_a, arr + n);
1963 arr[n++] = 3; /* Port unavailable */
1964 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1965 put_unaligned_be16(port_group_b, arr + n);
1967 arr[n++] = 0; /* Reserved */
1968 arr[n++] = 0; /* Status code */
1969 arr[n++] = 0; /* Vendor unique */
1970 arr[n++] = 0x1; /* One port per group */
1971 arr[n++] = 0; /* Reserved */
1972 arr[n++] = 0; /* Reserved */
1973 put_unaligned_be16(port_b, arr + n);
1977 put_unaligned_be32(rlen, arr + 0);
1980 * Return the smallest value of either
1981 * - The allocated length
1982 * - The constructed command length
1983 * - The maximum array size
1985 rlen = min(alen, n);
1986 ret = fill_from_dev_buffer(scp, arr,
1987 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1992 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1993 struct sdebug_dev_info *devip)
1996 u8 reporting_opts, req_opcode, sdeb_i, supp;
1998 u32 alloc_len, a_len;
1999 int k, offset, len, errsts, count, bump, na;
2000 const struct opcode_info_t *oip;
2001 const struct opcode_info_t *r_oip;
2003 u8 *cmd = scp->cmnd;
2005 rctd = !!(cmd[2] & 0x80);
2006 reporting_opts = cmd[2] & 0x7;
2007 req_opcode = cmd[3];
2008 req_sa = get_unaligned_be16(cmd + 4);
2009 alloc_len = get_unaligned_be32(cmd + 6);
2010 if (alloc_len < 4 || alloc_len > 0xffff) {
2011 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2012 return check_condition_result;
2014 if (alloc_len > 8192)
2018 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2020 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2022 return check_condition_result;
2024 switch (reporting_opts) {
2025 case 0: /* all commands */
2026 /* count number of commands */
2027 for (count = 0, oip = opcode_info_arr;
2028 oip->num_attached != 0xff; ++oip) {
2029 if (F_INV_OP & oip->flags)
2031 count += (oip->num_attached + 1);
2033 bump = rctd ? 20 : 8;
2034 put_unaligned_be32(count * bump, arr);
2035 for (offset = 4, oip = opcode_info_arr;
2036 oip->num_attached != 0xff && offset < a_len; ++oip) {
2037 if (F_INV_OP & oip->flags)
2039 na = oip->num_attached;
2040 arr[offset] = oip->opcode;
2041 put_unaligned_be16(oip->sa, arr + offset + 2);
2043 arr[offset + 5] |= 0x2;
2044 if (FF_SA & oip->flags)
2045 arr[offset + 5] |= 0x1;
2046 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2048 put_unaligned_be16(0xa, arr + offset + 8);
2050 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2051 if (F_INV_OP & oip->flags)
2054 arr[offset] = oip->opcode;
2055 put_unaligned_be16(oip->sa, arr + offset + 2);
2057 arr[offset + 5] |= 0x2;
2058 if (FF_SA & oip->flags)
2059 arr[offset + 5] |= 0x1;
2060 put_unaligned_be16(oip->len_mask[0],
2063 put_unaligned_be16(0xa,
2070 case 1: /* one command: opcode only */
2071 case 2: /* one command: opcode plus service action */
2072 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2073 sdeb_i = opcode_ind_arr[req_opcode];
2074 oip = &opcode_info_arr[sdeb_i];
2075 if (F_INV_OP & oip->flags) {
2079 if (1 == reporting_opts) {
2080 if (FF_SA & oip->flags) {
2081 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2084 return check_condition_result;
2087 } else if (2 == reporting_opts &&
2088 0 == (FF_SA & oip->flags)) {
2089 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2090 kfree(arr); /* point at requested sa */
2091 return check_condition_result;
2093 if (0 == (FF_SA & oip->flags) &&
2094 req_opcode == oip->opcode)
2096 else if (0 == (FF_SA & oip->flags)) {
2097 na = oip->num_attached;
2098 for (k = 0, oip = oip->arrp; k < na;
2100 if (req_opcode == oip->opcode)
2103 supp = (k >= na) ? 1 : 3;
2104 } else if (req_sa != oip->sa) {
2105 na = oip->num_attached;
2106 for (k = 0, oip = oip->arrp; k < na;
2108 if (req_sa == oip->sa)
2111 supp = (k >= na) ? 1 : 3;
2115 u = oip->len_mask[0];
2116 put_unaligned_be16(u, arr + 2);
2117 arr[4] = oip->opcode;
2118 for (k = 1; k < u; ++k)
2119 arr[4 + k] = (k < 16) ?
2120 oip->len_mask[k] : 0xff;
2125 arr[1] = (rctd ? 0x80 : 0) | supp;
2127 put_unaligned_be16(0xa, arr + offset);
2132 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2134 return check_condition_result;
2136 offset = (offset < a_len) ? offset : a_len;
2137 len = (offset < alloc_len) ? offset : alloc_len;
2138 errsts = fill_from_dev_buffer(scp, arr, len);
2143 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2144 struct sdebug_dev_info *devip)
2149 u8 *cmd = scp->cmnd;
2151 memset(arr, 0, sizeof(arr));
2152 repd = !!(cmd[2] & 0x80);
2153 alloc_len = get_unaligned_be32(cmd + 6);
2154 if (alloc_len < 4) {
2155 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2156 return check_condition_result;
2158 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2159 arr[1] = 0x1; /* ITNRS */
2166 len = (len < alloc_len) ? len : alloc_len;
2167 return fill_from_dev_buffer(scp, arr, len);
2170 /* <<Following mode page info copied from ST318451LW>> */
2172 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2173 { /* Read-Write Error Recovery page for mode_sense */
2174 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2177 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2179 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2180 return sizeof(err_recov_pg);
2183 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2184 { /* Disconnect-Reconnect page for mode_sense */
2185 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2186 0, 0, 0, 0, 0, 0, 0, 0};
2188 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2190 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2191 return sizeof(disconnect_pg);
2194 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2195 { /* Format device page for mode_sense */
2196 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2197 0, 0, 0, 0, 0, 0, 0, 0,
2198 0, 0, 0, 0, 0x40, 0, 0, 0};
2200 memcpy(p, format_pg, sizeof(format_pg));
2201 put_unaligned_be16(sdebug_sectors_per, p + 10);
2202 put_unaligned_be16(sdebug_sector_size, p + 12);
2203 if (sdebug_removable)
2204 p[20] |= 0x20; /* should agree with INQUIRY */
2206 memset(p + 2, 0, sizeof(format_pg) - 2);
2207 return sizeof(format_pg);
2210 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2211 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2214 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2215 { /* Caching page for mode_sense */
2216 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2218 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2219 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2221 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2222 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2223 memcpy(p, caching_pg, sizeof(caching_pg));
2225 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2226 else if (2 == pcontrol)
2227 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2228 return sizeof(caching_pg);
2231 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2234 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2235 { /* Control mode page for mode_sense */
2236 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2238 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2242 ctrl_m_pg[2] |= 0x4;
2244 ctrl_m_pg[2] &= ~0x4;
2247 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2249 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2251 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2252 else if (2 == pcontrol)
2253 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2254 return sizeof(ctrl_m_pg);
2258 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2259 { /* Informational Exceptions control mode page for mode_sense */
2260 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2262 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2265 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2267 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2268 else if (2 == pcontrol)
2269 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2270 return sizeof(iec_m_pg);
2273 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2274 { /* SAS SSP mode page - short format for mode_sense */
2275 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2276 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2278 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2280 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2281 return sizeof(sas_sf_m_pg);
2285 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2287 { /* SAS phy control and discover mode page for mode_sense */
2288 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2289 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2290 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2291 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2292 0x2, 0, 0, 0, 0, 0, 0, 0,
2293 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2294 0, 0, 0, 0, 0, 0, 0, 0,
2295 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2296 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2297 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2298 0x3, 0, 0, 0, 0, 0, 0, 0,
2299 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2300 0, 0, 0, 0, 0, 0, 0, 0,
2304 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2305 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2306 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2307 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2308 port_a = target_dev_id + 1;
2309 port_b = port_a + 1;
2310 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2311 put_unaligned_be32(port_a, p + 20);
2312 put_unaligned_be32(port_b, p + 48 + 20);
2314 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2315 return sizeof(sas_pcd_m_pg);
2318 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2319 { /* SAS SSP shared protocol specific port mode subpage */
2320 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2321 0, 0, 0, 0, 0, 0, 0, 0,
2324 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2326 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2327 return sizeof(sas_sha_m_pg);
2330 #define SDEBUG_MAX_MSENSE_SZ 256
2332 static int resp_mode_sense(struct scsi_cmnd *scp,
2333 struct sdebug_dev_info *devip)
2335 int pcontrol, pcode, subpcode, bd_len;
2336 unsigned char dev_spec;
2337 u32 alloc_len, offset, len;
2339 int target = scp->device->id;
2341 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2342 unsigned char *cmd = scp->cmnd;
2343 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2345 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2346 pcontrol = (cmd[2] & 0xc0) >> 6;
2347 pcode = cmd[2] & 0x3f;
2349 msense_6 = (MODE_SENSE == cmd[0]);
2350 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2351 is_disk = (sdebug_ptype == TYPE_DISK);
2352 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2353 if ((is_disk || is_zbc) && !dbd)
2354 bd_len = llbaa ? 16 : 8;
2357 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2358 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2359 if (0x3 == pcontrol) { /* Saving values not supported */
2360 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2361 return check_condition_result;
2363 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2364 (devip->target * 1000) - 3;
2365 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2366 if (is_disk || is_zbc) {
2367 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2379 arr[4] = 0x1; /* set LONGLBA bit */
2380 arr[7] = bd_len; /* assume 255 or less */
2384 if ((bd_len > 0) && (!sdebug_capacity))
2385 sdebug_capacity = get_sdebug_capacity();
2388 if (sdebug_capacity > 0xfffffffe)
2389 put_unaligned_be32(0xffffffff, ap + 0);
2391 put_unaligned_be32(sdebug_capacity, ap + 0);
2392 put_unaligned_be16(sdebug_sector_size, ap + 6);
2395 } else if (16 == bd_len) {
2396 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2397 put_unaligned_be32(sdebug_sector_size, ap + 12);
2402 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2403 /* TODO: Control Extension page */
2404 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2405 return check_condition_result;
2410 case 0x1: /* Read-Write error recovery page, direct access */
2411 len = resp_err_recov_pg(ap, pcontrol, target);
2414 case 0x2: /* Disconnect-Reconnect page, all devices */
2415 len = resp_disconnect_pg(ap, pcontrol, target);
2418 case 0x3: /* Format device page, direct access */
2420 len = resp_format_pg(ap, pcontrol, target);
2425 case 0x8: /* Caching page, direct access */
2426 if (is_disk || is_zbc) {
2427 len = resp_caching_pg(ap, pcontrol, target);
2432 case 0xa: /* Control Mode page, all devices */
2433 len = resp_ctrl_m_pg(ap, pcontrol, target);
2436 case 0x19: /* if spc==1 then sas phy, control+discover */
2437 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2438 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2439 return check_condition_result;
2442 if ((0x0 == subpcode) || (0xff == subpcode))
2443 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2444 if ((0x1 == subpcode) || (0xff == subpcode))
2445 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2447 if ((0x2 == subpcode) || (0xff == subpcode))
2448 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2451 case 0x1c: /* Informational Exceptions Mode page, all devices */
2452 len = resp_iec_m_pg(ap, pcontrol, target);
2455 case 0x3f: /* Read all Mode pages */
2456 if ((0 == subpcode) || (0xff == subpcode)) {
2457 len = resp_err_recov_pg(ap, pcontrol, target);
2458 len += resp_disconnect_pg(ap + len, pcontrol, target);
2460 len += resp_format_pg(ap + len, pcontrol,
2462 len += resp_caching_pg(ap + len, pcontrol,
2464 } else if (is_zbc) {
2465 len += resp_caching_pg(ap + len, pcontrol,
2468 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2469 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2470 if (0xff == subpcode) {
2471 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2472 target, target_dev_id);
2473 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2475 len += resp_iec_m_pg(ap + len, pcontrol, target);
2478 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2479 return check_condition_result;
2487 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2488 return check_condition_result;
2491 arr[0] = offset - 1;
2493 put_unaligned_be16((offset - 2), arr + 0);
2494 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2497 #define SDEBUG_MAX_MSELECT_SZ 512
2499 static int resp_mode_select(struct scsi_cmnd *scp,
2500 struct sdebug_dev_info *devip)
2502 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2503 int param_len, res, mpage;
2504 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2505 unsigned char *cmd = scp->cmnd;
2506 int mselect6 = (MODE_SELECT == cmd[0]);
2508 memset(arr, 0, sizeof(arr));
2511 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2512 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2513 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2514 return check_condition_result;
2516 res = fetch_to_dev_buffer(scp, arr, param_len);
2518 return DID_ERROR << 16;
2519 else if (sdebug_verbose && (res < param_len))
2520 sdev_printk(KERN_INFO, scp->device,
2521 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2522 __func__, param_len, res);
2523 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2524 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2525 off = bd_len + (mselect6 ? 4 : 8);
2526 if (md_len > 2 || off >= res) {
2527 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2528 return check_condition_result;
2530 mpage = arr[off] & 0x3f;
2531 ps = !!(arr[off] & 0x80);
2533 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2534 return check_condition_result;
2536 spf = !!(arr[off] & 0x40);
2537 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2539 if ((pg_len + off) > param_len) {
2540 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2541 PARAMETER_LIST_LENGTH_ERR, 0);
2542 return check_condition_result;
2545 case 0x8: /* Caching Mode page */
2546 if (caching_pg[1] == arr[off + 1]) {
2547 memcpy(caching_pg + 2, arr + off + 2,
2548 sizeof(caching_pg) - 2);
2549 goto set_mode_changed_ua;
2552 case 0xa: /* Control Mode page */
2553 if (ctrl_m_pg[1] == arr[off + 1]) {
2554 memcpy(ctrl_m_pg + 2, arr + off + 2,
2555 sizeof(ctrl_m_pg) - 2);
2556 if (ctrl_m_pg[4] & 0x8)
2560 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2561 goto set_mode_changed_ua;
2564 case 0x1c: /* Informational Exceptions Mode page */
2565 if (iec_m_pg[1] == arr[off + 1]) {
2566 memcpy(iec_m_pg + 2, arr + off + 2,
2567 sizeof(iec_m_pg) - 2);
2568 goto set_mode_changed_ua;
2574 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2575 return check_condition_result;
2576 set_mode_changed_ua:
2577 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2581 static int resp_temp_l_pg(unsigned char *arr)
2583 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2584 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2587 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2588 return sizeof(temp_l_pg);
2591 static int resp_ie_l_pg(unsigned char *arr)
2593 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2596 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2597 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2598 arr[4] = THRESHOLD_EXCEEDED;
2601 return sizeof(ie_l_pg);
2604 static int resp_env_rep_l_spg(unsigned char *arr)
2606 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2607 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2608 0x1, 0x0, 0x23, 0x8,
2609 0x0, 55, 72, 35, 55, 45, 0, 0,
2612 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2613 return sizeof(env_rep_l_spg);
2616 #define SDEBUG_MAX_LSENSE_SZ 512
2618 static int resp_log_sense(struct scsi_cmnd *scp,
2619 struct sdebug_dev_info *devip)
2621 int ppc, sp, pcode, subpcode;
2622 u32 alloc_len, len, n;
2623 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2624 unsigned char *cmd = scp->cmnd;
2626 memset(arr, 0, sizeof(arr));
2630 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2631 return check_condition_result;
2633 pcode = cmd[2] & 0x3f;
2634 subpcode = cmd[3] & 0xff;
2635 alloc_len = get_unaligned_be16(cmd + 7);
2637 if (0 == subpcode) {
2639 case 0x0: /* Supported log pages log page */
2641 arr[n++] = 0x0; /* this page */
2642 arr[n++] = 0xd; /* Temperature */
2643 arr[n++] = 0x2f; /* Informational exceptions */
2646 case 0xd: /* Temperature log page */
2647 arr[3] = resp_temp_l_pg(arr + 4);
2649 case 0x2f: /* Informational exceptions log page */
2650 arr[3] = resp_ie_l_pg(arr + 4);
2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654 return check_condition_result;
2656 } else if (0xff == subpcode) {
2660 case 0x0: /* Supported log pages and subpages log page */
2663 arr[n++] = 0x0; /* 0,0 page */
2665 arr[n++] = 0xff; /* this page */
2667 arr[n++] = 0x0; /* Temperature */
2669 arr[n++] = 0x1; /* Environment reporting */
2671 arr[n++] = 0xff; /* all 0xd subpages */
2673 arr[n++] = 0x0; /* Informational exceptions */
2675 arr[n++] = 0xff; /* all 0x2f subpages */
2678 case 0xd: /* Temperature subpages */
2681 arr[n++] = 0x0; /* Temperature */
2683 arr[n++] = 0x1; /* Environment reporting */
2685 arr[n++] = 0xff; /* these subpages */
2688 case 0x2f: /* Informational exceptions subpages */
2691 arr[n++] = 0x0; /* Informational exceptions */
2693 arr[n++] = 0xff; /* these subpages */
2697 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2698 return check_condition_result;
2700 } else if (subpcode > 0) {
2703 if (pcode == 0xd && subpcode == 1)
2704 arr[3] = resp_env_rep_l_spg(arr + 4);
2706 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2707 return check_condition_result;
2710 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2711 return check_condition_result;
2713 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2714 return fill_from_dev_buffer(scp, arr,
2715 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2718 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2720 return devip->nr_zones != 0;
2723 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2724 unsigned long long lba)
2726 u32 zno = lba >> devip->zsize_shift;
2727 struct sdeb_zone_state *zsp;
2729 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2730 return &devip->zstate[zno];
2733 * If the zone capacity is less than the zone size, adjust for gap
2736 zno = 2 * zno - devip->nr_conv_zones;
2737 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2738 zsp = &devip->zstate[zno];
2739 if (lba >= zsp->z_start + zsp->z_size)
2741 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2745 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2747 return zsp->z_type == ZBC_ZTYPE_CNV;
2750 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2752 return zsp->z_type == ZBC_ZTYPE_GAP;
2755 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2757 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2760 static void zbc_close_zone(struct sdebug_dev_info *devip,
2761 struct sdeb_zone_state *zsp)
2763 enum sdebug_z_cond zc;
2765 if (!zbc_zone_is_seq(zsp))
2769 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2772 if (zc == ZC2_IMPLICIT_OPEN)
2773 devip->nr_imp_open--;
2775 devip->nr_exp_open--;
2777 if (zsp->z_wp == zsp->z_start) {
2778 zsp->z_cond = ZC1_EMPTY;
2780 zsp->z_cond = ZC4_CLOSED;
2785 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2787 struct sdeb_zone_state *zsp = &devip->zstate[0];
2790 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2791 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2792 zbc_close_zone(devip, zsp);
2798 static void zbc_open_zone(struct sdebug_dev_info *devip,
2799 struct sdeb_zone_state *zsp, bool explicit)
2801 enum sdebug_z_cond zc;
2803 if (!zbc_zone_is_seq(zsp))
2807 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2808 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2811 /* Close an implicit open zone if necessary */
2812 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2813 zbc_close_zone(devip, zsp);
2814 else if (devip->max_open &&
2815 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2816 zbc_close_imp_open_zone(devip);
2818 if (zsp->z_cond == ZC4_CLOSED)
2821 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2822 devip->nr_exp_open++;
2824 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2825 devip->nr_imp_open++;
2829 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2830 unsigned long long lba, unsigned int num)
2832 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2833 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2835 if (!zbc_zone_is_seq(zsp))
2838 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2840 if (zsp->z_wp >= zend)
2841 zsp->z_cond = ZC5_FULL;
2846 if (lba != zsp->z_wp)
2847 zsp->z_non_seq_resource = true;
2853 } else if (end > zsp->z_wp) {
2859 if (zsp->z_wp >= zend)
2860 zsp->z_cond = ZC5_FULL;
2866 zend = zsp->z_start + zsp->z_size;
2871 static int check_zbc_access_params(struct scsi_cmnd *scp,
2872 unsigned long long lba, unsigned int num, bool write)
2874 struct scsi_device *sdp = scp->device;
2875 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2876 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2877 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2880 if (devip->zmodel == BLK_ZONED_HA)
2882 /* For host-managed, reads cannot cross zone types boundaries */
2883 if (zsp->z_type != zsp_end->z_type) {
2884 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2887 return check_condition_result;
2892 /* Writing into a gap zone is not allowed */
2893 if (zbc_zone_is_gap(zsp)) {
2894 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2895 ATTEMPT_ACCESS_GAP);
2896 return check_condition_result;
2899 /* No restrictions for writes within conventional zones */
2900 if (zbc_zone_is_conv(zsp)) {
2901 if (!zbc_zone_is_conv(zsp_end)) {
2902 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2904 WRITE_BOUNDARY_ASCQ);
2905 return check_condition_result;
2910 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2911 /* Writes cannot cross sequential zone boundaries */
2912 if (zsp_end != zsp) {
2913 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2915 WRITE_BOUNDARY_ASCQ);
2916 return check_condition_result;
2918 /* Cannot write full zones */
2919 if (zsp->z_cond == ZC5_FULL) {
2920 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2921 INVALID_FIELD_IN_CDB, 0);
2922 return check_condition_result;
2924 /* Writes must be aligned to the zone WP */
2925 if (lba != zsp->z_wp) {
2926 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2928 UNALIGNED_WRITE_ASCQ);
2929 return check_condition_result;
2933 /* Handle implicit open of closed and empty zones */
2934 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2935 if (devip->max_open &&
2936 devip->nr_exp_open >= devip->max_open) {
2937 mk_sense_buffer(scp, DATA_PROTECT,
2940 return check_condition_result;
2942 zbc_open_zone(devip, zsp, false);
2948 static inline int check_device_access_params
2949 (struct scsi_cmnd *scp, unsigned long long lba,
2950 unsigned int num, bool write)
2952 struct scsi_device *sdp = scp->device;
2953 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2955 if (lba + num > sdebug_capacity) {
2956 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2957 return check_condition_result;
2959 /* transfer length excessive (tie in to block limits VPD page) */
2960 if (num > sdebug_store_sectors) {
2961 /* needs work to find which cdb byte 'num' comes from */
2962 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2963 return check_condition_result;
2965 if (write && unlikely(sdebug_wp)) {
2966 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2967 return check_condition_result;
2969 if (sdebug_dev_is_zoned(devip))
2970 return check_zbc_access_params(scp, lba, num, write);
2976 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2977 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2978 * that access any of the "stores" in struct sdeb_store_info should call this
2979 * function with bug_if_fake_rw set to true.
2981 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2982 bool bug_if_fake_rw)
2984 if (sdebug_fake_rw) {
2985 BUG_ON(bug_if_fake_rw); /* See note above */
2988 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2991 /* Returns number of bytes copied or -1 if error. */
2992 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2993 u32 sg_skip, u64 lba, u32 num, bool do_write)
2996 u64 block, rest = 0;
2997 enum dma_data_direction dir;
2998 struct scsi_data_buffer *sdb = &scp->sdb;
3002 dir = DMA_TO_DEVICE;
3003 write_since_sync = true;
3005 dir = DMA_FROM_DEVICE;
3008 if (!sdb->length || !sip)
3010 if (scp->sc_data_direction != dir)
3014 block = do_div(lba, sdebug_store_sectors);
3015 if (block + num > sdebug_store_sectors)
3016 rest = block + num - sdebug_store_sectors;
3018 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3019 fsp + (block * sdebug_sector_size),
3020 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3021 if (ret != (num - rest) * sdebug_sector_size)
3025 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3026 fsp, rest * sdebug_sector_size,
3027 sg_skip + ((num - rest) * sdebug_sector_size),
3034 /* Returns number of bytes copied or -1 if error. */
3035 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3037 struct scsi_data_buffer *sdb = &scp->sdb;
3041 if (scp->sc_data_direction != DMA_TO_DEVICE)
3043 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3044 num * sdebug_sector_size, 0, true);
3047 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3048 * arr into sip->storep+lba and return true. If comparison fails then
3050 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3051 const u8 *arr, bool compare_only)
3054 u64 block, rest = 0;
3055 u32 store_blks = sdebug_store_sectors;
3056 u32 lb_size = sdebug_sector_size;
3057 u8 *fsp = sip->storep;
3059 block = do_div(lba, store_blks);
3060 if (block + num > store_blks)
3061 rest = block + num - store_blks;
3063 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3067 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3073 arr += num * lb_size;
3074 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3076 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3080 static __be16 dif_compute_csum(const void *buf, int len)
3085 csum = (__force __be16)ip_compute_csum(buf, len);
3087 csum = cpu_to_be16(crc_t10dif(buf, len));
3092 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3093 sector_t sector, u32 ei_lba)
3095 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3097 if (sdt->guard_tag != csum) {
3098 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3099 (unsigned long)sector,
3100 be16_to_cpu(sdt->guard_tag),
3104 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3105 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3106 pr_err("REF check failed on sector %lu\n",
3107 (unsigned long)sector);
3110 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3111 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3112 pr_err("REF check failed on sector %lu\n",
3113 (unsigned long)sector);
3119 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3120 unsigned int sectors, bool read)
3124 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3125 scp->device->hostdata, true);
3126 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3127 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3128 struct sg_mapping_iter miter;
3130 /* Bytes of protection data to copy into sgl */
3131 resid = sectors * sizeof(*dif_storep);
3133 sg_miter_start(&miter, scsi_prot_sglist(scp),
3134 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3135 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3137 while (sg_miter_next(&miter) && resid > 0) {
3138 size_t len = min_t(size_t, miter.length, resid);
3139 void *start = dif_store(sip, sector);
3142 if (dif_store_end < start + len)
3143 rest = start + len - dif_store_end;
3148 memcpy(paddr, start, len - rest);
3150 memcpy(start, paddr, len - rest);
3154 memcpy(paddr + len - rest, dif_storep, rest);
3156 memcpy(dif_storep, paddr + len - rest, rest);
3159 sector += len / sizeof(*dif_storep);
3162 sg_miter_stop(&miter);
3165 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3166 unsigned int sectors, u32 ei_lba)
3171 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3172 scp->device->hostdata, true);
3173 struct t10_pi_tuple *sdt;
3175 for (i = 0; i < sectors; i++, ei_lba++) {
3176 sector = start_sec + i;
3177 sdt = dif_store(sip, sector);
3179 if (sdt->app_tag == cpu_to_be16(0xffff))
3183 * Because scsi_debug acts as both initiator and
3184 * target we proceed to verify the PI even if
3185 * RDPROTECT=3. This is done so the "initiator" knows
3186 * which type of error to return. Otherwise we would
3187 * have to iterate over the PI twice.
3189 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3190 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3199 dif_copy_prot(scp, start_sec, sectors, true);
3206 sdeb_read_lock(struct sdeb_store_info *sip)
3208 if (sdebug_no_rwlock) {
3210 __acquire(&sip->macc_lck);
3212 __acquire(&sdeb_fake_rw_lck);
3215 read_lock(&sip->macc_lck);
3217 read_lock(&sdeb_fake_rw_lck);
3222 sdeb_read_unlock(struct sdeb_store_info *sip)
3224 if (sdebug_no_rwlock) {
3226 __release(&sip->macc_lck);
3228 __release(&sdeb_fake_rw_lck);
3231 read_unlock(&sip->macc_lck);
3233 read_unlock(&sdeb_fake_rw_lck);
3238 sdeb_write_lock(struct sdeb_store_info *sip)
3240 if (sdebug_no_rwlock) {
3242 __acquire(&sip->macc_lck);
3244 __acquire(&sdeb_fake_rw_lck);
3247 write_lock(&sip->macc_lck);
3249 write_lock(&sdeb_fake_rw_lck);
3254 sdeb_write_unlock(struct sdeb_store_info *sip)
3256 if (sdebug_no_rwlock) {
3258 __release(&sip->macc_lck);
3260 __release(&sdeb_fake_rw_lck);
3263 write_unlock(&sip->macc_lck);
3265 write_unlock(&sdeb_fake_rw_lck);
3269 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3276 struct sdeb_store_info *sip = devip2sip(devip, true);
3277 u8 *cmd = scp->cmnd;
3282 lba = get_unaligned_be64(cmd + 2);
3283 num = get_unaligned_be32(cmd + 10);
3288 lba = get_unaligned_be32(cmd + 2);
3289 num = get_unaligned_be16(cmd + 7);
3294 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3295 (u32)(cmd[1] & 0x1f) << 16;
3296 num = (0 == cmd[4]) ? 256 : cmd[4];
3301 lba = get_unaligned_be32(cmd + 2);
3302 num = get_unaligned_be32(cmd + 6);
3305 case XDWRITEREAD_10:
3307 lba = get_unaligned_be32(cmd + 2);
3308 num = get_unaligned_be16(cmd + 7);
3311 default: /* assume READ(32) */
3312 lba = get_unaligned_be64(cmd + 12);
3313 ei_lba = get_unaligned_be32(cmd + 20);
3314 num = get_unaligned_be32(cmd + 28);
3318 if (unlikely(have_dif_prot && check_prot)) {
3319 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3321 mk_sense_invalid_opcode(scp);
3322 return check_condition_result;
3324 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3325 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3326 (cmd[1] & 0xe0) == 0)
3327 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3330 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3331 atomic_read(&sdeb_inject_pending))) {
3333 atomic_set(&sdeb_inject_pending, 0);
3336 ret = check_device_access_params(scp, lba, num, false);
3339 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3340 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3341 ((lba + num) > sdebug_medium_error_start))) {
3342 /* claim unrecoverable read error */
3343 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3344 /* set info field and valid bit for fixed descriptor */
3345 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3346 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3347 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3348 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3349 put_unaligned_be32(ret, scp->sense_buffer + 3);
3351 scsi_set_resid(scp, scsi_bufflen(scp));
3352 return check_condition_result;
3355 sdeb_read_lock(sip);
3358 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3359 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3360 case 1: /* Guard tag error */
3361 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3362 sdeb_read_unlock(sip);
3363 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3364 return check_condition_result;
3365 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3366 sdeb_read_unlock(sip);
3367 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3368 return illegal_condition_result;
3371 case 3: /* Reference tag error */
3372 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3373 sdeb_read_unlock(sip);
3374 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3375 return check_condition_result;
3376 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3377 sdeb_read_unlock(sip);
3378 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3379 return illegal_condition_result;
3385 ret = do_device_access(sip, scp, 0, lba, num, false);
3386 sdeb_read_unlock(sip);
3387 if (unlikely(ret == -1))
3388 return DID_ERROR << 16;
3390 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3392 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3393 atomic_read(&sdeb_inject_pending))) {
3394 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3395 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3396 atomic_set(&sdeb_inject_pending, 0);
3397 return check_condition_result;
3398 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3399 /* Logical block guard check failed */
3400 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3401 atomic_set(&sdeb_inject_pending, 0);
3402 return illegal_condition_result;
3403 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3404 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3405 atomic_set(&sdeb_inject_pending, 0);
3406 return illegal_condition_result;
3412 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3413 unsigned int sectors, u32 ei_lba)
3416 struct t10_pi_tuple *sdt;
3418 sector_t sector = start_sec;
3421 struct sg_mapping_iter diter;
3422 struct sg_mapping_iter piter;
3424 BUG_ON(scsi_sg_count(SCpnt) == 0);
3425 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3427 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3428 scsi_prot_sg_count(SCpnt),
3429 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3430 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3431 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3433 /* For each protection page */
3434 while (sg_miter_next(&piter)) {
3436 if (WARN_ON(!sg_miter_next(&diter))) {
3441 for (ppage_offset = 0; ppage_offset < piter.length;
3442 ppage_offset += sizeof(struct t10_pi_tuple)) {
3443 /* If we're at the end of the current
3444 * data page advance to the next one
3446 if (dpage_offset >= diter.length) {
3447 if (WARN_ON(!sg_miter_next(&diter))) {
3454 sdt = piter.addr + ppage_offset;
3455 daddr = diter.addr + dpage_offset;
3457 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3458 ret = dif_verify(sdt, daddr, sector, ei_lba);
3465 dpage_offset += sdebug_sector_size;
3467 diter.consumed = dpage_offset;
3468 sg_miter_stop(&diter);
3470 sg_miter_stop(&piter);
3472 dif_copy_prot(SCpnt, start_sec, sectors, false);
3479 sg_miter_stop(&diter);
3480 sg_miter_stop(&piter);
3484 static unsigned long lba_to_map_index(sector_t lba)
3486 if (sdebug_unmap_alignment)
3487 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3488 sector_div(lba, sdebug_unmap_granularity);
3492 static sector_t map_index_to_lba(unsigned long index)
3494 sector_t lba = index * sdebug_unmap_granularity;
3496 if (sdebug_unmap_alignment)
3497 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3501 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3505 unsigned int mapped;
3506 unsigned long index;
3509 index = lba_to_map_index(lba);
3510 mapped = test_bit(index, sip->map_storep);
3513 next = find_next_zero_bit(sip->map_storep, map_size, index);
3515 next = find_next_bit(sip->map_storep, map_size, index);
3517 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3522 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3525 sector_t end = lba + len;
3528 unsigned long index = lba_to_map_index(lba);
3530 if (index < map_size)
3531 set_bit(index, sip->map_storep);
3533 lba = map_index_to_lba(index + 1);
3537 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3540 sector_t end = lba + len;
3541 u8 *fsp = sip->storep;
3544 unsigned long index = lba_to_map_index(lba);
3546 if (lba == map_index_to_lba(index) &&
3547 lba + sdebug_unmap_granularity <= end &&
3549 clear_bit(index, sip->map_storep);
3550 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3551 memset(fsp + lba * sdebug_sector_size,
3552 (sdebug_lbprz & 1) ? 0 : 0xff,
3553 sdebug_sector_size *
3554 sdebug_unmap_granularity);
3556 if (sip->dif_storep) {
3557 memset(sip->dif_storep + lba, 0xff,
3558 sizeof(*sip->dif_storep) *
3559 sdebug_unmap_granularity);
3562 lba = map_index_to_lba(index + 1);
3566 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3573 struct sdeb_store_info *sip = devip2sip(devip, true);
3574 u8 *cmd = scp->cmnd;
3579 lba = get_unaligned_be64(cmd + 2);
3580 num = get_unaligned_be32(cmd + 10);
3585 lba = get_unaligned_be32(cmd + 2);
3586 num = get_unaligned_be16(cmd + 7);
3591 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3592 (u32)(cmd[1] & 0x1f) << 16;
3593 num = (0 == cmd[4]) ? 256 : cmd[4];
3598 lba = get_unaligned_be32(cmd + 2);
3599 num = get_unaligned_be32(cmd + 6);
3602 case 0x53: /* XDWRITEREAD(10) */
3604 lba = get_unaligned_be32(cmd + 2);
3605 num = get_unaligned_be16(cmd + 7);
3608 default: /* assume WRITE(32) */
3609 lba = get_unaligned_be64(cmd + 12);
3610 ei_lba = get_unaligned_be32(cmd + 20);
3611 num = get_unaligned_be32(cmd + 28);
3615 if (unlikely(have_dif_prot && check_prot)) {
3616 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3618 mk_sense_invalid_opcode(scp);
3619 return check_condition_result;
3621 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3622 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3623 (cmd[1] & 0xe0) == 0)
3624 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3628 sdeb_write_lock(sip);
3629 ret = check_device_access_params(scp, lba, num, true);
3631 sdeb_write_unlock(sip);
3636 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3637 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3638 case 1: /* Guard tag error */
3639 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3640 sdeb_write_unlock(sip);
3641 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3642 return illegal_condition_result;
3643 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3644 sdeb_write_unlock(sip);
3645 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3646 return check_condition_result;
3649 case 3: /* Reference tag error */
3650 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3651 sdeb_write_unlock(sip);
3652 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3653 return illegal_condition_result;
3654 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3655 sdeb_write_unlock(sip);
3656 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3657 return check_condition_result;
3663 ret = do_device_access(sip, scp, 0, lba, num, true);
3664 if (unlikely(scsi_debug_lbp()))
3665 map_region(sip, lba, num);
3666 /* If ZBC zone then bump its write pointer */
3667 if (sdebug_dev_is_zoned(devip))
3668 zbc_inc_wp(devip, lba, num);
3669 sdeb_write_unlock(sip);
3670 if (unlikely(-1 == ret))
3671 return DID_ERROR << 16;
3672 else if (unlikely(sdebug_verbose &&
3673 (ret < (num * sdebug_sector_size))))
3674 sdev_printk(KERN_INFO, scp->device,
3675 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3676 my_name, num * sdebug_sector_size, ret);
3678 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3679 atomic_read(&sdeb_inject_pending))) {
3680 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3681 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3682 atomic_set(&sdeb_inject_pending, 0);
3683 return check_condition_result;
3684 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3685 /* Logical block guard check failed */
3686 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3687 atomic_set(&sdeb_inject_pending, 0);
3688 return illegal_condition_result;
3689 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3690 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3691 atomic_set(&sdeb_inject_pending, 0);
3692 return illegal_condition_result;
3699 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3700 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3702 static int resp_write_scat(struct scsi_cmnd *scp,
3703 struct sdebug_dev_info *devip)
3705 u8 *cmd = scp->cmnd;
3708 struct sdeb_store_info *sip = devip2sip(devip, true);
3710 u16 lbdof, num_lrd, k;
3711 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3712 u32 lb_size = sdebug_sector_size;
3717 static const u32 lrd_size = 32; /* + parameter list header size */
3719 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3721 wrprotect = (cmd[10] >> 5) & 0x7;
3722 lbdof = get_unaligned_be16(cmd + 12);
3723 num_lrd = get_unaligned_be16(cmd + 16);
3724 bt_len = get_unaligned_be32(cmd + 28);
3725 } else { /* that leaves WRITE SCATTERED(16) */
3727 wrprotect = (cmd[2] >> 5) & 0x7;
3728 lbdof = get_unaligned_be16(cmd + 4);
3729 num_lrd = get_unaligned_be16(cmd + 8);
3730 bt_len = get_unaligned_be32(cmd + 10);
3731 if (unlikely(have_dif_prot)) {
3732 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3734 mk_sense_invalid_opcode(scp);
3735 return illegal_condition_result;
3737 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3738 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3740 sdev_printk(KERN_ERR, scp->device,
3741 "Unprotected WR to DIF device\n");
3744 if ((num_lrd == 0) || (bt_len == 0))
3745 return 0; /* T10 says these do-nothings are not errors */
3748 sdev_printk(KERN_INFO, scp->device,
3749 "%s: %s: LB Data Offset field bad\n",
3751 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3752 return illegal_condition_result;
3754 lbdof_blen = lbdof * lb_size;
3755 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3757 sdev_printk(KERN_INFO, scp->device,
3758 "%s: %s: LBA range descriptors don't fit\n",
3760 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3761 return illegal_condition_result;
3763 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3765 return SCSI_MLQUEUE_HOST_BUSY;
3767 sdev_printk(KERN_INFO, scp->device,
3768 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3769 my_name, __func__, lbdof_blen);
3770 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3772 ret = DID_ERROR << 16;
3776 sdeb_write_lock(sip);
3777 sg_off = lbdof_blen;
3778 /* Spec says Buffer xfer Length field in number of LBs in dout */
3780 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3781 lba = get_unaligned_be64(up + 0);
3782 num = get_unaligned_be32(up + 8);
3784 sdev_printk(KERN_INFO, scp->device,
3785 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3786 my_name, __func__, k, lba, num, sg_off);
3789 ret = check_device_access_params(scp, lba, num, true);
3791 goto err_out_unlock;
3792 num_by = num * lb_size;
3793 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3795 if ((cum_lb + num) > bt_len) {
3797 sdev_printk(KERN_INFO, scp->device,
3798 "%s: %s: sum of blocks > data provided\n",
3800 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3802 ret = illegal_condition_result;
3803 goto err_out_unlock;
3807 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3808 int prot_ret = prot_verify_write(scp, lba, num,
3812 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3814 ret = illegal_condition_result;
3815 goto err_out_unlock;
3819 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3820 /* If ZBC zone then bump its write pointer */
3821 if (sdebug_dev_is_zoned(devip))
3822 zbc_inc_wp(devip, lba, num);
3823 if (unlikely(scsi_debug_lbp()))
3824 map_region(sip, lba, num);
3825 if (unlikely(-1 == ret)) {
3826 ret = DID_ERROR << 16;
3827 goto err_out_unlock;
3828 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3829 sdev_printk(KERN_INFO, scp->device,
3830 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3831 my_name, num_by, ret);
3833 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3834 atomic_read(&sdeb_inject_pending))) {
3835 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3836 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3837 atomic_set(&sdeb_inject_pending, 0);
3838 ret = check_condition_result;
3839 goto err_out_unlock;
3840 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3841 /* Logical block guard check failed */
3842 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3843 atomic_set(&sdeb_inject_pending, 0);
3844 ret = illegal_condition_result;
3845 goto err_out_unlock;
3846 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3847 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3848 atomic_set(&sdeb_inject_pending, 0);
3849 ret = illegal_condition_result;
3850 goto err_out_unlock;
3858 sdeb_write_unlock(sip);
3864 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3865 u32 ei_lba, bool unmap, bool ndob)
3867 struct scsi_device *sdp = scp->device;
3868 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3869 unsigned long long i;
3871 u32 lb_size = sdebug_sector_size;
3873 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3874 scp->device->hostdata, true);
3878 sdeb_write_lock(sip);
3880 ret = check_device_access_params(scp, lba, num, true);
3882 sdeb_write_unlock(sip);
3886 if (unmap && scsi_debug_lbp()) {
3887 unmap_region(sip, lba, num);
3891 block = do_div(lbaa, sdebug_store_sectors);
3892 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3894 fs1p = fsp + (block * lb_size);
3896 memset(fs1p, 0, lb_size);
3899 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3902 sdeb_write_unlock(sip);
3903 return DID_ERROR << 16;
3904 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3905 sdev_printk(KERN_INFO, scp->device,
3906 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3907 my_name, "write same", lb_size, ret);
3909 /* Copy first sector to remaining blocks */
3910 for (i = 1 ; i < num ; i++) {
3912 block = do_div(lbaa, sdebug_store_sectors);
3913 memmove(fsp + (block * lb_size), fs1p, lb_size);
3915 if (scsi_debug_lbp())
3916 map_region(sip, lba, num);
3917 /* If ZBC zone then bump its write pointer */
3918 if (sdebug_dev_is_zoned(devip))
3919 zbc_inc_wp(devip, lba, num);
3921 sdeb_write_unlock(sip);
3926 static int resp_write_same_10(struct scsi_cmnd *scp,
3927 struct sdebug_dev_info *devip)
3929 u8 *cmd = scp->cmnd;
3936 if (sdebug_lbpws10 == 0) {
3937 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3938 return check_condition_result;
3942 lba = get_unaligned_be32(cmd + 2);
3943 num = get_unaligned_be16(cmd + 7);
3944 if (num > sdebug_write_same_length) {
3945 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3946 return check_condition_result;
3948 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3951 static int resp_write_same_16(struct scsi_cmnd *scp,
3952 struct sdebug_dev_info *devip)
3954 u8 *cmd = scp->cmnd;
3961 if (cmd[1] & 0x8) { /* UNMAP */
3962 if (sdebug_lbpws == 0) {
3963 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3964 return check_condition_result;
3968 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3970 lba = get_unaligned_be64(cmd + 2);
3971 num = get_unaligned_be32(cmd + 10);
3972 if (num > sdebug_write_same_length) {
3973 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3974 return check_condition_result;
3976 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3979 /* Note the mode field is in the same position as the (lower) service action
3980 * field. For the Report supported operation codes command, SPC-4 suggests
3981 * each mode of this command should be reported separately; for future. */
3982 static int resp_write_buffer(struct scsi_cmnd *scp,
3983 struct sdebug_dev_info *devip)
3985 u8 *cmd = scp->cmnd;
3986 struct scsi_device *sdp = scp->device;
3987 struct sdebug_dev_info *dp;
3990 mode = cmd[1] & 0x1f;
3992 case 0x4: /* download microcode (MC) and activate (ACT) */
3993 /* set UAs on this device only */
3994 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3995 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3997 case 0x5: /* download MC, save and ACT */
3998 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4000 case 0x6: /* download MC with offsets and ACT */
4001 /* set UAs on most devices (LUs) in this target */
4002 list_for_each_entry(dp,
4003 &devip->sdbg_host->dev_info_list,
4005 if (dp->target == sdp->id) {
4006 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4008 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4012 case 0x7: /* download MC with offsets, save, and ACT */
4013 /* set UA on all devices (LUs) in this target */
4014 list_for_each_entry(dp,
4015 &devip->sdbg_host->dev_info_list,
4017 if (dp->target == sdp->id)
4018 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4022 /* do nothing for this command for other mode values */
4028 static int resp_comp_write(struct scsi_cmnd *scp,
4029 struct sdebug_dev_info *devip)
4031 u8 *cmd = scp->cmnd;
4033 struct sdeb_store_info *sip = devip2sip(devip, true);
4036 u32 lb_size = sdebug_sector_size;
4041 lba = get_unaligned_be64(cmd + 2);
4042 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4044 return 0; /* degenerate case, not an error */
4045 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4047 mk_sense_invalid_opcode(scp);
4048 return check_condition_result;
4050 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4051 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4052 (cmd[1] & 0xe0) == 0)
4053 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4055 ret = check_device_access_params(scp, lba, num, false);
4059 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4061 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4063 return check_condition_result;
4066 sdeb_write_lock(sip);
4068 ret = do_dout_fetch(scp, dnum, arr);
4070 retval = DID_ERROR << 16;
4072 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4073 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4074 "indicated=%u, IO sent=%d bytes\n", my_name,
4075 dnum * lb_size, ret);
4076 if (!comp_write_worker(sip, lba, num, arr, false)) {
4077 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4078 retval = check_condition_result;
4081 if (scsi_debug_lbp())
4082 map_region(sip, lba, num);
4084 sdeb_write_unlock(sip);
4089 struct unmap_block_desc {
4095 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4098 struct unmap_block_desc *desc;
4099 struct sdeb_store_info *sip = devip2sip(devip, true);
4100 unsigned int i, payload_len, descriptors;
4103 if (!scsi_debug_lbp())
4104 return 0; /* fib and say its done */
4105 payload_len = get_unaligned_be16(scp->cmnd + 7);
4106 BUG_ON(scsi_bufflen(scp) != payload_len);
4108 descriptors = (payload_len - 8) / 16;
4109 if (descriptors > sdebug_unmap_max_desc) {
4110 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4111 return check_condition_result;
4114 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4116 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4118 return check_condition_result;
4121 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4123 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4124 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4126 desc = (void *)&buf[8];
4128 sdeb_write_lock(sip);
4130 for (i = 0 ; i < descriptors ; i++) {
4131 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4132 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4134 ret = check_device_access_params(scp, lba, num, true);
4138 unmap_region(sip, lba, num);
4144 sdeb_write_unlock(sip);
4150 #define SDEBUG_GET_LBA_STATUS_LEN 32
4152 static int resp_get_lba_status(struct scsi_cmnd *scp,
4153 struct sdebug_dev_info *devip)
4155 u8 *cmd = scp->cmnd;
4157 u32 alloc_len, mapped, num;
4159 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4161 lba = get_unaligned_be64(cmd + 2);
4162 alloc_len = get_unaligned_be32(cmd + 10);
4167 ret = check_device_access_params(scp, lba, 1, false);
4171 if (scsi_debug_lbp()) {
4172 struct sdeb_store_info *sip = devip2sip(devip, true);
4174 mapped = map_state(sip, lba, &num);
4177 /* following just in case virtual_gb changed */
4178 sdebug_capacity = get_sdebug_capacity();
4179 if (sdebug_capacity - lba <= 0xffffffff)
4180 num = sdebug_capacity - lba;
4185 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4186 put_unaligned_be32(20, arr); /* Parameter Data Length */
4187 put_unaligned_be64(lba, arr + 8); /* LBA */
4188 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4189 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4191 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4194 static int resp_sync_cache(struct scsi_cmnd *scp,
4195 struct sdebug_dev_info *devip)
4200 u8 *cmd = scp->cmnd;
4202 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4203 lba = get_unaligned_be32(cmd + 2);
4204 num_blocks = get_unaligned_be16(cmd + 7);
4205 } else { /* SYNCHRONIZE_CACHE(16) */
4206 lba = get_unaligned_be64(cmd + 2);
4207 num_blocks = get_unaligned_be32(cmd + 10);
4209 if (lba + num_blocks > sdebug_capacity) {
4210 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4211 return check_condition_result;
4213 if (!write_since_sync || (cmd[1] & 0x2))
4214 res = SDEG_RES_IMMED_MASK;
4215 else /* delay if write_since_sync and IMMED clear */
4216 write_since_sync = false;
4221 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4222 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4223 * a GOOD status otherwise. Model a disk with a big cache and yield
4224 * CONDITION MET. Actually tries to bring range in main memory into the
4225 * cache associated with the CPU(s).
4227 static int resp_pre_fetch(struct scsi_cmnd *scp,
4228 struct sdebug_dev_info *devip)
4232 u64 block, rest = 0;
4234 u8 *cmd = scp->cmnd;
4235 struct sdeb_store_info *sip = devip2sip(devip, true);
4236 u8 *fsp = sip->storep;
4238 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4239 lba = get_unaligned_be32(cmd + 2);
4240 nblks = get_unaligned_be16(cmd + 7);
4241 } else { /* PRE-FETCH(16) */
4242 lba = get_unaligned_be64(cmd + 2);
4243 nblks = get_unaligned_be32(cmd + 10);
4245 if (lba + nblks > sdebug_capacity) {
4246 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4247 return check_condition_result;
4251 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4252 block = do_div(lba, sdebug_store_sectors);
4253 if (block + nblks > sdebug_store_sectors)
4254 rest = block + nblks - sdebug_store_sectors;
4256 /* Try to bring the PRE-FETCH range into CPU's cache */
4257 sdeb_read_lock(sip);
4258 prefetch_range(fsp + (sdebug_sector_size * block),
4259 (nblks - rest) * sdebug_sector_size);
4261 prefetch_range(fsp, rest * sdebug_sector_size);
4262 sdeb_read_unlock(sip);
4265 res = SDEG_RES_IMMED_MASK;
4266 return res | condition_met_result;
4269 #define RL_BUCKET_ELEMS 8
4271 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4272 * (W-LUN), the normal Linux scanning logic does not associate it with a
4273 * device (e.g. /dev/sg7). The following magic will make that association:
4274 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4275 * where <n> is a host number. If there are multiple targets in a host then
4276 * the above will associate a W-LUN to each target. To only get a W-LUN
4277 * for target 2, then use "echo '- 2 49409' > scan" .
4279 static int resp_report_luns(struct scsi_cmnd *scp,
4280 struct sdebug_dev_info *devip)
4282 unsigned char *cmd = scp->cmnd;
4283 unsigned int alloc_len;
4284 unsigned char select_report;
4286 struct scsi_lun *lun_p;
4287 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4288 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4289 unsigned int wlun_cnt; /* report luns W-LUN count */
4290 unsigned int tlun_cnt; /* total LUN count */
4291 unsigned int rlen; /* response length (in bytes) */
4293 unsigned int off_rsp = 0;
4294 const int sz_lun = sizeof(struct scsi_lun);
4296 clear_luns_changed_on_target(devip);
4298 select_report = cmd[2];
4299 alloc_len = get_unaligned_be32(cmd + 6);
4301 if (alloc_len < 4) {
4302 pr_err("alloc len too small %d\n", alloc_len);
4303 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4304 return check_condition_result;
4307 switch (select_report) {
4308 case 0: /* all LUNs apart from W-LUNs */
4309 lun_cnt = sdebug_max_luns;
4312 case 1: /* only W-LUNs */
4316 case 2: /* all LUNs */
4317 lun_cnt = sdebug_max_luns;
4320 case 0x10: /* only administrative LUs */
4321 case 0x11: /* see SPC-5 */
4322 case 0x12: /* only subsiduary LUs owned by referenced LU */
4324 pr_debug("select report invalid %d\n", select_report);
4325 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4326 return check_condition_result;
4329 if (sdebug_no_lun_0 && (lun_cnt > 0))
4332 tlun_cnt = lun_cnt + wlun_cnt;
4333 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4334 scsi_set_resid(scp, scsi_bufflen(scp));
4335 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4336 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4338 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4339 lun = sdebug_no_lun_0 ? 1 : 0;
4340 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4341 memset(arr, 0, sizeof(arr));
4342 lun_p = (struct scsi_lun *)&arr[0];
4344 put_unaligned_be32(rlen, &arr[0]);
4348 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4349 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4351 int_to_scsilun(lun++, lun_p);
4352 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4353 lun_p->scsi_lun[0] |= 0x40;
4355 if (j < RL_BUCKET_ELEMS)
4358 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4364 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4368 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4372 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4374 bool is_bytchk3 = false;
4377 u32 vnum, a_num, off;
4378 const u32 lb_size = sdebug_sector_size;
4381 u8 *cmd = scp->cmnd;
4382 struct sdeb_store_info *sip = devip2sip(devip, true);
4384 bytchk = (cmd[1] >> 1) & 0x3;
4386 return 0; /* always claim internal verify okay */
4387 } else if (bytchk == 2) {
4388 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4389 return check_condition_result;
4390 } else if (bytchk == 3) {
4391 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4395 lba = get_unaligned_be64(cmd + 2);
4396 vnum = get_unaligned_be32(cmd + 10);
4398 case VERIFY: /* is VERIFY(10) */
4399 lba = get_unaligned_be32(cmd + 2);
4400 vnum = get_unaligned_be16(cmd + 7);
4403 mk_sense_invalid_opcode(scp);
4404 return check_condition_result;
4407 return 0; /* not an error */
4408 a_num = is_bytchk3 ? 1 : vnum;
4409 /* Treat following check like one for read (i.e. no write) access */
4410 ret = check_device_access_params(scp, lba, a_num, false);
4414 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4416 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4418 return check_condition_result;
4420 /* Not changing store, so only need read access */
4421 sdeb_read_lock(sip);
4423 ret = do_dout_fetch(scp, a_num, arr);
4425 ret = DID_ERROR << 16;
4427 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4428 sdev_printk(KERN_INFO, scp->device,
4429 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4430 my_name, __func__, a_num * lb_size, ret);
4433 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4434 memcpy(arr + off, arr, lb_size);
4437 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4438 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4439 ret = check_condition_result;
4443 sdeb_read_unlock(sip);
4448 #define RZONES_DESC_HD 64
4450 /* Report zones depending on start LBA and reporting options */
4451 static int resp_report_zones(struct scsi_cmnd *scp,
4452 struct sdebug_dev_info *devip)
4454 unsigned int rep_max_zones, nrz = 0;
4456 u32 alloc_len, rep_opts, rep_len;
4459 u8 *arr = NULL, *desc;
4460 u8 *cmd = scp->cmnd;
4461 struct sdeb_zone_state *zsp = NULL;
4462 struct sdeb_store_info *sip = devip2sip(devip, false);
4464 if (!sdebug_dev_is_zoned(devip)) {
4465 mk_sense_invalid_opcode(scp);
4466 return check_condition_result;
4468 zs_lba = get_unaligned_be64(cmd + 2);
4469 alloc_len = get_unaligned_be32(cmd + 10);
4471 return 0; /* not an error */
4472 rep_opts = cmd[14] & 0x3f;
4473 partial = cmd[14] & 0x80;
4475 if (zs_lba >= sdebug_capacity) {
4476 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4477 return check_condition_result;
4480 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4482 arr = kzalloc(alloc_len, GFP_ATOMIC);
4484 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4486 return check_condition_result;
4489 sdeb_read_lock(sip);
4492 for (lba = zs_lba; lba < sdebug_capacity;
4493 lba = zsp->z_start + zsp->z_size) {
4494 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4496 zsp = zbc_zone(devip, lba);
4503 if (zsp->z_cond != ZC1_EMPTY)
4507 /* Implicit open zones */
4508 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4512 /* Explicit open zones */
4513 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4518 if (zsp->z_cond != ZC4_CLOSED)
4523 if (zsp->z_cond != ZC5_FULL)
4530 * Read-only, offline, reset WP recommended are
4531 * not emulated: no zones to report;
4535 /* non-seq-resource set */
4536 if (!zsp->z_non_seq_resource)
4540 /* All zones except gap zones. */
4541 if (zbc_zone_is_gap(zsp))
4545 /* Not write pointer (conventional) zones */
4546 if (zbc_zone_is_seq(zsp))
4550 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4551 INVALID_FIELD_IN_CDB, 0);
4552 ret = check_condition_result;
4556 if (nrz < rep_max_zones) {
4557 /* Fill zone descriptor */
4558 desc[0] = zsp->z_type;
4559 desc[1] = zsp->z_cond << 4;
4560 if (zsp->z_non_seq_resource)
4562 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4563 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4564 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4568 if (partial && nrz >= rep_max_zones)
4575 /* Zone list length. */
4576 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4578 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4579 /* Zone starting LBA granularity. */
4580 if (devip->zcap < devip->zsize)
4581 put_unaligned_be64(devip->zsize, arr + 16);
4583 rep_len = (unsigned long)desc - (unsigned long)arr;
4584 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4587 sdeb_read_unlock(sip);
4592 /* Logic transplanted from tcmu-runner, file_zbc.c */
4593 static void zbc_open_all(struct sdebug_dev_info *devip)
4595 struct sdeb_zone_state *zsp = &devip->zstate[0];
4598 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4599 if (zsp->z_cond == ZC4_CLOSED)
4600 zbc_open_zone(devip, &devip->zstate[i], true);
4604 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4608 enum sdebug_z_cond zc;
4609 u8 *cmd = scp->cmnd;
4610 struct sdeb_zone_state *zsp;
4611 bool all = cmd[14] & 0x01;
4612 struct sdeb_store_info *sip = devip2sip(devip, false);
4614 if (!sdebug_dev_is_zoned(devip)) {
4615 mk_sense_invalid_opcode(scp);
4616 return check_condition_result;
4619 sdeb_write_lock(sip);
4622 /* Check if all closed zones can be open */
4623 if (devip->max_open &&
4624 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4625 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4627 res = check_condition_result;
4630 /* Open all closed zones */
4631 zbc_open_all(devip);
4635 /* Open the specified zone */
4636 z_id = get_unaligned_be64(cmd + 2);
4637 if (z_id >= sdebug_capacity) {
4638 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4639 res = check_condition_result;
4643 zsp = zbc_zone(devip, z_id);
4644 if (z_id != zsp->z_start) {
4645 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4646 res = check_condition_result;
4649 if (zbc_zone_is_conv(zsp)) {
4650 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4651 res = check_condition_result;
4656 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4659 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4660 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4662 res = check_condition_result;
4666 zbc_open_zone(devip, zsp, true);
4668 sdeb_write_unlock(sip);
4672 static void zbc_close_all(struct sdebug_dev_info *devip)
4676 for (i = 0; i < devip->nr_zones; i++)
4677 zbc_close_zone(devip, &devip->zstate[i]);
4680 static int resp_close_zone(struct scsi_cmnd *scp,
4681 struct sdebug_dev_info *devip)
4685 u8 *cmd = scp->cmnd;
4686 struct sdeb_zone_state *zsp;
4687 bool all = cmd[14] & 0x01;
4688 struct sdeb_store_info *sip = devip2sip(devip, false);
4690 if (!sdebug_dev_is_zoned(devip)) {
4691 mk_sense_invalid_opcode(scp);
4692 return check_condition_result;
4695 sdeb_write_lock(sip);
4698 zbc_close_all(devip);
4702 /* Close specified zone */
4703 z_id = get_unaligned_be64(cmd + 2);
4704 if (z_id >= sdebug_capacity) {
4705 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4706 res = check_condition_result;
4710 zsp = zbc_zone(devip, z_id);
4711 if (z_id != zsp->z_start) {
4712 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4713 res = check_condition_result;
4716 if (zbc_zone_is_conv(zsp)) {
4717 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4718 res = check_condition_result;
4722 zbc_close_zone(devip, zsp);
4724 sdeb_write_unlock(sip);
4728 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4729 struct sdeb_zone_state *zsp, bool empty)
4731 enum sdebug_z_cond zc = zsp->z_cond;
4733 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4734 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4735 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4736 zbc_close_zone(devip, zsp);
4737 if (zsp->z_cond == ZC4_CLOSED)
4739 zsp->z_wp = zsp->z_start + zsp->z_size;
4740 zsp->z_cond = ZC5_FULL;
4744 static void zbc_finish_all(struct sdebug_dev_info *devip)
4748 for (i = 0; i < devip->nr_zones; i++)
4749 zbc_finish_zone(devip, &devip->zstate[i], false);
4752 static int resp_finish_zone(struct scsi_cmnd *scp,
4753 struct sdebug_dev_info *devip)
4755 struct sdeb_zone_state *zsp;
4758 u8 *cmd = scp->cmnd;
4759 bool all = cmd[14] & 0x01;
4760 struct sdeb_store_info *sip = devip2sip(devip, false);
4762 if (!sdebug_dev_is_zoned(devip)) {
4763 mk_sense_invalid_opcode(scp);
4764 return check_condition_result;
4767 sdeb_write_lock(sip);
4770 zbc_finish_all(devip);
4774 /* Finish the specified zone */
4775 z_id = get_unaligned_be64(cmd + 2);
4776 if (z_id >= sdebug_capacity) {
4777 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4778 res = check_condition_result;
4782 zsp = zbc_zone(devip, z_id);
4783 if (z_id != zsp->z_start) {
4784 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4785 res = check_condition_result;
4788 if (zbc_zone_is_conv(zsp)) {
4789 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4790 res = check_condition_result;
4794 zbc_finish_zone(devip, zsp, true);
4796 sdeb_write_unlock(sip);
4800 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4801 struct sdeb_zone_state *zsp)
4803 enum sdebug_z_cond zc;
4804 struct sdeb_store_info *sip = devip2sip(devip, false);
4806 if (!zbc_zone_is_seq(zsp))
4810 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4811 zbc_close_zone(devip, zsp);
4813 if (zsp->z_cond == ZC4_CLOSED)
4816 if (zsp->z_wp > zsp->z_start)
4817 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4818 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4820 zsp->z_non_seq_resource = false;
4821 zsp->z_wp = zsp->z_start;
4822 zsp->z_cond = ZC1_EMPTY;
4825 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4829 for (i = 0; i < devip->nr_zones; i++)
4830 zbc_rwp_zone(devip, &devip->zstate[i]);
4833 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4835 struct sdeb_zone_state *zsp;
4838 u8 *cmd = scp->cmnd;
4839 bool all = cmd[14] & 0x01;
4840 struct sdeb_store_info *sip = devip2sip(devip, false);
4842 if (!sdebug_dev_is_zoned(devip)) {
4843 mk_sense_invalid_opcode(scp);
4844 return check_condition_result;
4847 sdeb_write_lock(sip);
4854 z_id = get_unaligned_be64(cmd + 2);
4855 if (z_id >= sdebug_capacity) {
4856 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4857 res = check_condition_result;
4861 zsp = zbc_zone(devip, z_id);
4862 if (z_id != zsp->z_start) {
4863 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4864 res = check_condition_result;
4867 if (zbc_zone_is_conv(zsp)) {
4868 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4869 res = check_condition_result;
4873 zbc_rwp_zone(devip, zsp);
4875 sdeb_write_unlock(sip);
4879 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4882 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4884 hwq = blk_mq_unique_tag_to_hwq(tag);
4886 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4887 if (WARN_ON_ONCE(hwq >= submit_queues))
4890 return sdebug_q_arr + hwq;
4893 static u32 get_tag(struct scsi_cmnd *cmnd)
4895 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4898 /* Queued (deferred) command completions converge here. */
4899 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4901 bool aborted = sd_dp->aborted;
4904 unsigned long iflags;
4905 struct sdebug_queue *sqp;
4906 struct sdebug_queued_cmd *sqcp;
4907 struct scsi_cmnd *scp;
4908 struct sdebug_dev_info *devip;
4910 if (unlikely(aborted))
4911 sd_dp->aborted = false;
4912 qc_idx = sd_dp->qc_idx;
4913 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4914 if (sdebug_statistics) {
4915 atomic_inc(&sdebug_completions);
4916 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4917 atomic_inc(&sdebug_miss_cpus);
4919 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4920 pr_err("wild qc_idx=%d\n", qc_idx);
4923 spin_lock_irqsave(&sqp->qc_lock, iflags);
4924 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4925 sqcp = &sqp->qc_arr[qc_idx];
4927 if (unlikely(scp == NULL)) {
4928 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4929 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4930 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4933 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4935 atomic_dec(&devip->num_in_q);
4937 pr_err("devip=NULL\n");
4938 if (unlikely(atomic_read(&retired_max_queue) > 0))
4941 sqcp->a_cmnd = NULL;
4942 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4943 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4944 pr_err("Unexpected completion\n");
4948 if (unlikely(retiring)) { /* user has reduced max_queue */
4951 retval = atomic_read(&retired_max_queue);
4952 if (qc_idx >= retval) {
4953 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4954 pr_err("index %d too large\n", retval);
4957 k = find_last_bit(sqp->in_use_bm, retval);
4958 if ((k < sdebug_max_queue) || (k == retval))
4959 atomic_set(&retired_max_queue, 0);
4961 atomic_set(&retired_max_queue, k + 1);
4963 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4964 if (unlikely(aborted)) {
4966 pr_info("bypassing scsi_done() due to aborted cmd\n");
4969 scsi_done(scp); /* callback to mid level */
4972 /* When high resolution timer goes off this function is called. */
4973 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4975 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4977 sdebug_q_cmd_complete(sd_dp);
4978 return HRTIMER_NORESTART;
4981 /* When work queue schedules work, it calls this function. */
4982 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4984 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4986 sdebug_q_cmd_complete(sd_dp);
4989 static bool got_shared_uuid;
4990 static uuid_t shared_uuid;
4992 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4994 struct sdeb_zone_state *zsp;
4995 sector_t capacity = get_sdebug_capacity();
4996 sector_t conv_capacity;
4997 sector_t zstart = 0;
5001 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5002 * a zone size allowing for at least 4 zones on the device. Otherwise,
5003 * use the specified zone size checking that at least 2 zones can be
5004 * created for the device.
5006 if (!sdeb_zbc_zone_size_mb) {
5007 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5008 >> ilog2(sdebug_sector_size);
5009 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5011 if (devip->zsize < 2) {
5012 pr_err("Device capacity too small\n");
5016 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5017 pr_err("Zone size is not a power of 2\n");
5020 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5021 >> ilog2(sdebug_sector_size);
5022 if (devip->zsize >= capacity) {
5023 pr_err("Zone size too large for device capacity\n");
5028 devip->zsize_shift = ilog2(devip->zsize);
5029 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5031 if (sdeb_zbc_zone_cap_mb == 0) {
5032 devip->zcap = devip->zsize;
5034 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5035 ilog2(sdebug_sector_size);
5036 if (devip->zcap > devip->zsize) {
5037 pr_err("Zone capacity too large\n");
5042 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5043 if (conv_capacity >= capacity) {
5044 pr_err("Number of conventional zones too large\n");
5047 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5048 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5050 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5052 /* Add gap zones if zone capacity is smaller than the zone size */
5053 if (devip->zcap < devip->zsize)
5054 devip->nr_zones += devip->nr_seq_zones;
5056 if (devip->zmodel == BLK_ZONED_HM) {
5057 /* zbc_max_open_zones can be 0, meaning "not reported" */
5058 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5059 devip->max_open = (devip->nr_zones - 1) / 2;
5061 devip->max_open = sdeb_zbc_max_open;
5064 devip->zstate = kcalloc(devip->nr_zones,
5065 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5069 for (i = 0; i < devip->nr_zones; i++) {
5070 zsp = &devip->zstate[i];
5072 zsp->z_start = zstart;
5074 if (i < devip->nr_conv_zones) {
5075 zsp->z_type = ZBC_ZTYPE_CNV;
5076 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5077 zsp->z_wp = (sector_t)-1;
5079 min_t(u64, devip->zsize, capacity - zstart);
5080 } else if ((zstart & (devip->zsize - 1)) == 0) {
5081 if (devip->zmodel == BLK_ZONED_HM)
5082 zsp->z_type = ZBC_ZTYPE_SWR;
5084 zsp->z_type = ZBC_ZTYPE_SWP;
5085 zsp->z_cond = ZC1_EMPTY;
5086 zsp->z_wp = zsp->z_start;
5088 min_t(u64, devip->zcap, capacity - zstart);
5090 zsp->z_type = ZBC_ZTYPE_GAP;
5091 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5092 zsp->z_wp = (sector_t)-1;
5093 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5097 WARN_ON_ONCE((int)zsp->z_size <= 0);
5098 zstart += zsp->z_size;
5104 static struct sdebug_dev_info *sdebug_device_create(
5105 struct sdebug_host_info *sdbg_host, gfp_t flags)
5107 struct sdebug_dev_info *devip;
5109 devip = kzalloc(sizeof(*devip), flags);
5111 if (sdebug_uuid_ctl == 1)
5112 uuid_gen(&devip->lu_name);
5113 else if (sdebug_uuid_ctl == 2) {
5114 if (got_shared_uuid)
5115 devip->lu_name = shared_uuid;
5117 uuid_gen(&shared_uuid);
5118 got_shared_uuid = true;
5119 devip->lu_name = shared_uuid;
5122 devip->sdbg_host = sdbg_host;
5123 if (sdeb_zbc_in_use) {
5124 devip->zmodel = sdeb_zbc_model;
5125 if (sdebug_device_create_zones(devip)) {
5130 devip->zmodel = BLK_ZONED_NONE;
5132 devip->sdbg_host = sdbg_host;
5133 devip->create_ts = ktime_get_boottime();
5134 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5135 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5140 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5142 struct sdebug_host_info *sdbg_host;
5143 struct sdebug_dev_info *open_devip = NULL;
5144 struct sdebug_dev_info *devip;
5146 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5148 pr_err("Host info NULL\n");
5152 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5153 if ((devip->used) && (devip->channel == sdev->channel) &&
5154 (devip->target == sdev->id) &&
5155 (devip->lun == sdev->lun))
5158 if ((!devip->used) && (!open_devip))
5162 if (!open_devip) { /* try and make a new one */
5163 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5165 pr_err("out of memory at line %d\n", __LINE__);
5170 open_devip->channel = sdev->channel;
5171 open_devip->target = sdev->id;
5172 open_devip->lun = sdev->lun;
5173 open_devip->sdbg_host = sdbg_host;
5174 atomic_set(&open_devip->num_in_q, 0);
5175 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5176 open_devip->used = true;
5180 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5183 pr_info("slave_alloc <%u %u %u %llu>\n",
5184 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5188 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5190 struct sdebug_dev_info *devip =
5191 (struct sdebug_dev_info *)sdp->hostdata;
5194 pr_info("slave_configure <%u %u %u %llu>\n",
5195 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5196 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5197 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5198 if (devip == NULL) {
5199 devip = find_build_dev_info(sdp);
5201 return 1; /* no resources, will be marked offline */
5203 sdp->hostdata = devip;
5205 sdp->no_uld_attach = 1;
5206 config_cdb_len(sdp);
5210 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5212 struct sdebug_dev_info *devip =
5213 (struct sdebug_dev_info *)sdp->hostdata;
5216 pr_info("slave_destroy <%u %u %u %llu>\n",
5217 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5219 /* make this slot available for re-use */
5220 devip->used = false;
5221 sdp->hostdata = NULL;
5225 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5226 enum sdeb_defer_type defer_t)
5230 if (defer_t == SDEB_DEFER_HRT)
5231 hrtimer_cancel(&sd_dp->hrt);
5232 else if (defer_t == SDEB_DEFER_WQ)
5233 cancel_work_sync(&sd_dp->ew.work);
5236 /* If @cmnd found deletes its timer or work queue and returns true; else
5238 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5240 unsigned long iflags;
5241 int j, k, qmax, r_qmax;
5242 enum sdeb_defer_type l_defer_t;
5243 struct sdebug_queue *sqp;
5244 struct sdebug_queued_cmd *sqcp;
5245 struct sdebug_dev_info *devip;
5246 struct sdebug_defer *sd_dp;
5248 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5249 spin_lock_irqsave(&sqp->qc_lock, iflags);
5250 qmax = sdebug_max_queue;
5251 r_qmax = atomic_read(&retired_max_queue);
5254 for (k = 0; k < qmax; ++k) {
5255 if (test_bit(k, sqp->in_use_bm)) {
5256 sqcp = &sqp->qc_arr[k];
5257 if (cmnd != sqcp->a_cmnd)
5260 devip = (struct sdebug_dev_info *)
5261 cmnd->device->hostdata;
5263 atomic_dec(&devip->num_in_q);
5264 sqcp->a_cmnd = NULL;
5265 sd_dp = sqcp->sd_dp;
5267 l_defer_t = READ_ONCE(sd_dp->defer_t);
5268 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5270 l_defer_t = SDEB_DEFER_NONE;
5271 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5272 stop_qc_helper(sd_dp, l_defer_t);
5273 clear_bit(k, sqp->in_use_bm);
5277 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5282 /* Deletes (stops) timers or work queues of all queued commands */
5283 static void stop_all_queued(void)
5285 unsigned long iflags;
5287 enum sdeb_defer_type l_defer_t;
5288 struct sdebug_queue *sqp;
5289 struct sdebug_queued_cmd *sqcp;
5290 struct sdebug_dev_info *devip;
5291 struct sdebug_defer *sd_dp;
5293 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5294 spin_lock_irqsave(&sqp->qc_lock, iflags);
5295 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5296 if (test_bit(k, sqp->in_use_bm)) {
5297 sqcp = &sqp->qc_arr[k];
5298 if (sqcp->a_cmnd == NULL)
5300 devip = (struct sdebug_dev_info *)
5301 sqcp->a_cmnd->device->hostdata;
5303 atomic_dec(&devip->num_in_q);
5304 sqcp->a_cmnd = NULL;
5305 sd_dp = sqcp->sd_dp;
5307 l_defer_t = READ_ONCE(sd_dp->defer_t);
5308 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5310 l_defer_t = SDEB_DEFER_NONE;
5311 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5312 stop_qc_helper(sd_dp, l_defer_t);
5313 clear_bit(k, sqp->in_use_bm);
5314 spin_lock_irqsave(&sqp->qc_lock, iflags);
5317 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5321 /* Free queued command memory on heap */
5322 static void free_all_queued(void)
5325 struct sdebug_queue *sqp;
5326 struct sdebug_queued_cmd *sqcp;
5328 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5329 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5330 sqcp = &sqp->qc_arr[k];
5337 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5343 ok = stop_queued_cmnd(SCpnt);
5344 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5345 sdev_printk(KERN_INFO, SCpnt->device,
5346 "%s: command%s found\n", __func__,
5352 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5355 if (SCpnt && SCpnt->device) {
5356 struct scsi_device *sdp = SCpnt->device;
5357 struct sdebug_dev_info *devip =
5358 (struct sdebug_dev_info *)sdp->hostdata;
5360 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5361 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5363 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5368 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5370 struct sdebug_host_info *sdbg_host;
5371 struct sdebug_dev_info *devip;
5372 struct scsi_device *sdp;
5373 struct Scsi_Host *hp;
5376 ++num_target_resets;
5379 sdp = SCpnt->device;
5382 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5383 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5387 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5389 list_for_each_entry(devip,
5390 &sdbg_host->dev_info_list,
5392 if (devip->target == sdp->id) {
5393 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5397 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5398 sdev_printk(KERN_INFO, sdp,
5399 "%s: %d device(s) found in target\n", __func__, k);
5404 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5406 struct sdebug_host_info *sdbg_host;
5407 struct sdebug_dev_info *devip;
5408 struct scsi_device *sdp;
5409 struct Scsi_Host *hp;
5413 if (!(SCpnt && SCpnt->device))
5415 sdp = SCpnt->device;
5416 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5417 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5420 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5422 list_for_each_entry(devip,
5423 &sdbg_host->dev_info_list,
5425 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5430 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5431 sdev_printk(KERN_INFO, sdp,
5432 "%s: %d device(s) found in host\n", __func__, k);
5437 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5439 struct sdebug_host_info *sdbg_host;
5440 struct sdebug_dev_info *devip;
5444 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5445 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5446 spin_lock(&sdebug_host_list_lock);
5447 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5448 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5450 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5454 spin_unlock(&sdebug_host_list_lock);
5456 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5457 sdev_printk(KERN_INFO, SCpnt->device,
5458 "%s: %d device(s) found\n", __func__, k);
5462 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5464 struct msdos_partition *pp;
5465 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5466 int sectors_per_part, num_sectors, k;
5467 int heads_by_sects, start_sec, end_sec;
5469 /* assume partition table already zeroed */
5470 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5472 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5473 sdebug_num_parts = SDEBUG_MAX_PARTS;
5474 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5476 num_sectors = (int)get_sdebug_capacity();
5477 sectors_per_part = (num_sectors - sdebug_sectors_per)
5479 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5480 starts[0] = sdebug_sectors_per;
5481 max_part_secs = sectors_per_part;
5482 for (k = 1; k < sdebug_num_parts; ++k) {
5483 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5485 if (starts[k] - starts[k - 1] < max_part_secs)
5486 max_part_secs = starts[k] - starts[k - 1];
5488 starts[sdebug_num_parts] = num_sectors;
5489 starts[sdebug_num_parts + 1] = 0;
5491 ramp[510] = 0x55; /* magic partition markings */
5493 pp = (struct msdos_partition *)(ramp + 0x1be);
5494 for (k = 0; starts[k + 1]; ++k, ++pp) {
5495 start_sec = starts[k];
5496 end_sec = starts[k] + max_part_secs - 1;
5499 pp->cyl = start_sec / heads_by_sects;
5500 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5501 / sdebug_sectors_per;
5502 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5504 pp->end_cyl = end_sec / heads_by_sects;
5505 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5506 / sdebug_sectors_per;
5507 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5509 pp->start_sect = cpu_to_le32(start_sec);
5510 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5511 pp->sys_ind = 0x83; /* plain Linux partition */
5515 static void block_unblock_all_queues(bool block)
5518 struct sdebug_queue *sqp;
5520 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5521 atomic_set(&sqp->blocked, (int)block);
5524 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5525 * commands will be processed normally before triggers occur.
5527 static void tweak_cmnd_count(void)
5531 modulo = abs(sdebug_every_nth);
5534 block_unblock_all_queues(true);
5535 count = atomic_read(&sdebug_cmnd_count);
5536 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5537 block_unblock_all_queues(false);
5540 static void clear_queue_stats(void)
5542 atomic_set(&sdebug_cmnd_count, 0);
5543 atomic_set(&sdebug_completions, 0);
5544 atomic_set(&sdebug_miss_cpus, 0);
5545 atomic_set(&sdebug_a_tsf, 0);
5548 static bool inject_on_this_cmd(void)
5550 if (sdebug_every_nth == 0)
5552 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5555 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5557 /* Complete the processing of the thread that queued a SCSI command to this
5558 * driver. It either completes the command by calling cmnd_done() or
5559 * schedules a hr timer or work queue then returns 0. Returns
5560 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5562 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5564 int (*pfp)(struct scsi_cmnd *,
5565 struct sdebug_dev_info *),
5566 int delta_jiff, int ndelay)
5569 bool inject = false;
5570 bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5571 int k, num_in_q, qdepth;
5572 unsigned long iflags;
5573 u64 ns_from_boot = 0;
5574 struct sdebug_queue *sqp;
5575 struct sdebug_queued_cmd *sqcp;
5576 struct scsi_device *sdp;
5577 struct sdebug_defer *sd_dp;
5579 if (unlikely(devip == NULL)) {
5580 if (scsi_result == 0)
5581 scsi_result = DID_NO_CONNECT << 16;
5582 goto respond_in_thread;
5586 if (delta_jiff == 0)
5587 goto respond_in_thread;
5589 sqp = get_queue(cmnd);
5590 spin_lock_irqsave(&sqp->qc_lock, iflags);
5591 if (unlikely(atomic_read(&sqp->blocked))) {
5592 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5593 return SCSI_MLQUEUE_HOST_BUSY;
5595 num_in_q = atomic_read(&devip->num_in_q);
5596 qdepth = cmnd->device->queue_depth;
5597 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5599 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5600 goto respond_in_thread;
5602 scsi_result = device_qfull_result;
5603 } else if (unlikely(sdebug_every_nth &&
5604 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5605 (scsi_result == 0))) {
5606 if ((num_in_q == (qdepth - 1)) &&
5607 (atomic_inc_return(&sdebug_a_tsf) >=
5608 abs(sdebug_every_nth))) {
5609 atomic_set(&sdebug_a_tsf, 0);
5611 scsi_result = device_qfull_result;
5615 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5616 if (unlikely(k >= sdebug_max_queue)) {
5617 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5619 goto respond_in_thread;
5620 scsi_result = device_qfull_result;
5621 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5622 sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5623 __func__, sdebug_max_queue);
5624 goto respond_in_thread;
5626 set_bit(k, sqp->in_use_bm);
5627 atomic_inc(&devip->num_in_q);
5628 sqcp = &sqp->qc_arr[k];
5629 sqcp->a_cmnd = cmnd;
5630 cmnd->host_scribble = (unsigned char *)sqcp;
5631 sd_dp = sqcp->sd_dp;
5632 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5635 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5637 atomic_dec(&devip->num_in_q);
5638 clear_bit(k, sqp->in_use_bm);
5639 return SCSI_MLQUEUE_HOST_BUSY;
5646 /* Set the hostwide tag */
5647 if (sdebug_host_max_queue)
5648 sd_dp->hc_idx = get_tag(cmnd);
5651 ns_from_boot = ktime_get_boottime_ns();
5653 /* one of the resp_*() response functions is called here */
5654 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5655 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5656 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5657 delta_jiff = ndelay = 0;
5659 if (cmnd->result == 0 && scsi_result != 0)
5660 cmnd->result = scsi_result;
5661 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5662 if (atomic_read(&sdeb_inject_pending)) {
5663 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5664 atomic_set(&sdeb_inject_pending, 0);
5665 cmnd->result = check_condition_result;
5669 if (unlikely(sdebug_verbose && cmnd->result))
5670 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5671 __func__, cmnd->result);
5673 if (delta_jiff > 0 || ndelay > 0) {
5676 if (delta_jiff > 0) {
5677 u64 ns = jiffies_to_nsecs(delta_jiff);
5679 if (sdebug_random && ns < U32_MAX) {
5680 ns = prandom_u32_max((u32)ns);
5681 } else if (sdebug_random) {
5682 ns >>= 12; /* scale to 4 usec precision */
5683 if (ns < U32_MAX) /* over 4 hours max */
5684 ns = prandom_u32_max((u32)ns);
5687 kt = ns_to_ktime(ns);
5688 } else { /* ndelay has a 4.2 second max */
5689 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5691 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5692 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5694 if (kt <= d) { /* elapsed duration >= kt */
5695 spin_lock_irqsave(&sqp->qc_lock, iflags);
5696 sqcp->a_cmnd = NULL;
5697 atomic_dec(&devip->num_in_q);
5698 clear_bit(k, sqp->in_use_bm);
5699 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5702 /* call scsi_done() from this thread */
5706 /* otherwise reduce kt by elapsed time */
5711 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5712 spin_lock_irqsave(&sqp->qc_lock, iflags);
5713 if (!sd_dp->init_poll) {
5714 sd_dp->init_poll = true;
5715 sqcp->sd_dp = sd_dp;
5716 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5719 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5720 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5722 if (!sd_dp->init_hrt) {
5723 sd_dp->init_hrt = true;
5724 sqcp->sd_dp = sd_dp;
5725 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5726 HRTIMER_MODE_REL_PINNED);
5727 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5728 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5731 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5732 /* schedule the invocation of scsi_done() for a later time */
5733 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5735 if (sdebug_statistics)
5736 sd_dp->issuing_cpu = raw_smp_processor_id();
5737 } else { /* jdelay < 0, use work queue */
5738 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5739 atomic_read(&sdeb_inject_pending)))
5740 sd_dp->aborted = true;
5742 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5743 spin_lock_irqsave(&sqp->qc_lock, iflags);
5744 if (!sd_dp->init_poll) {
5745 sd_dp->init_poll = true;
5746 sqcp->sd_dp = sd_dp;
5747 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5750 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5751 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5753 if (!sd_dp->init_wq) {
5754 sd_dp->init_wq = true;
5755 sqcp->sd_dp = sd_dp;
5756 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5758 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5760 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5761 schedule_work(&sd_dp->ew.work);
5763 if (sdebug_statistics)
5764 sd_dp->issuing_cpu = raw_smp_processor_id();
5765 if (unlikely(sd_dp->aborted)) {
5766 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5767 scsi_cmd_to_rq(cmnd)->tag);
5768 blk_abort_request(scsi_cmd_to_rq(cmnd));
5769 atomic_set(&sdeb_inject_pending, 0);
5770 sd_dp->aborted = false;
5773 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5774 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5775 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5778 respond_in_thread: /* call back to mid-layer using invocation thread */
5779 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5780 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5781 if (cmnd->result == 0 && scsi_result != 0)
5782 cmnd->result = scsi_result;
5787 /* Note: The following macros create attribute files in the
5788 /sys/module/scsi_debug/parameters directory. Unfortunately this
5789 driver is unaware of a change and cannot trigger auxiliary actions
5790 as it can when the corresponding attribute in the
5791 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5793 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5794 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5795 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5796 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5797 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5798 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5799 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5800 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5801 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5802 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5803 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5804 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5805 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5806 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5807 module_param_string(inq_product, sdebug_inq_product_id,
5808 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5809 module_param_string(inq_rev, sdebug_inq_product_rev,
5810 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5811 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5812 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5813 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5814 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5815 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5816 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5817 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5818 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5819 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5820 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5821 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5823 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5825 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5826 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5827 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5828 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5829 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5830 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5831 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5832 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5833 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5834 module_param_named(per_host_store, sdebug_per_host_store, bool,
5836 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5837 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5838 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5839 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5840 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5841 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5842 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5843 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5844 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5845 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5846 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5847 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5848 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5849 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5850 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5851 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5852 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5853 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5855 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5856 module_param_named(write_same_length, sdebug_write_same_length, int,
5858 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5859 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5860 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5861 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5862 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5864 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5865 MODULE_DESCRIPTION("SCSI debug adapter driver");
5866 MODULE_LICENSE("GPL");
5867 MODULE_VERSION(SDEBUG_VERSION);
5869 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5870 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5871 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5872 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5873 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5874 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5875 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5876 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5877 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5878 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5879 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5880 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5881 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5882 MODULE_PARM_DESC(host_max_queue,
5883 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5884 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5885 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5886 SDEBUG_VERSION "\")");
5887 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5888 MODULE_PARM_DESC(lbprz,
5889 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5890 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5891 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5892 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5893 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5894 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5895 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5896 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5897 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5898 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5899 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5900 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5901 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5902 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5903 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5904 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5905 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5906 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5907 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5908 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5909 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5910 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5911 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5912 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5913 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5914 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5915 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5916 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5917 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5918 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5919 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5920 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5921 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5922 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5923 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5924 MODULE_PARM_DESC(uuid_ctl,
5925 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5926 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5927 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5928 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5929 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5930 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5931 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5932 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5933 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5934 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5936 #define SDEBUG_INFO_LEN 256
5937 static char sdebug_info[SDEBUG_INFO_LEN];
5939 static const char *scsi_debug_info(struct Scsi_Host *shp)
5943 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5944 my_name, SDEBUG_VERSION, sdebug_version_date);
5945 if (k >= (SDEBUG_INFO_LEN - 1))
5947 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5948 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5949 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5950 "statistics", (int)sdebug_statistics);
5954 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5955 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5960 int minLen = length > 15 ? 15 : length;
5962 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5964 memcpy(arr, buffer, minLen);
5966 if (1 != sscanf(arr, "%d", &opts))
5969 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5970 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5971 if (sdebug_every_nth != 0)
5976 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5977 * same for each scsi_debug host (if more than one). Some of the counters
5978 * output are not atomics so might be inaccurate in a busy system. */
5979 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5982 struct sdebug_queue *sqp;
5983 struct sdebug_host_info *sdhp;
5985 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5986 SDEBUG_VERSION, sdebug_version_date);
5987 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5988 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5989 sdebug_opts, sdebug_every_nth);
5990 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5991 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5992 sdebug_sector_size, "bytes");
5993 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5994 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5996 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5997 num_dev_resets, num_target_resets, num_bus_resets,
5999 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6000 dix_reads, dix_writes, dif_errors);
6001 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6003 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6004 atomic_read(&sdebug_cmnd_count),
6005 atomic_read(&sdebug_completions),
6006 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6007 atomic_read(&sdebug_a_tsf),
6008 atomic_read(&sdeb_mq_poll_count));
6010 seq_printf(m, "submit_queues=%d\n", submit_queues);
6011 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6012 seq_printf(m, " queue %d:\n", j);
6013 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6014 if (f != sdebug_max_queue) {
6015 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6016 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6017 "first,last bits", f, l);
6021 seq_printf(m, "this host_no=%d\n", host->host_no);
6022 if (!xa_empty(per_store_ap)) {
6025 unsigned long l_idx;
6026 struct sdeb_store_info *sip;
6028 seq_puts(m, "\nhost list:\n");
6030 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6032 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6033 sdhp->shost->host_no, idx);
6036 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6037 sdeb_most_recent_idx);
6039 xa_for_each(per_store_ap, l_idx, sip) {
6040 niu = xa_get_mark(per_store_ap, l_idx,
6041 SDEB_XA_NOT_IN_USE);
6043 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6044 (niu ? " not_in_use" : ""));
6051 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6053 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6055 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6056 * of delay is jiffies.
6058 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6063 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6065 if (sdebug_jdelay != jdelay) {
6067 struct sdebug_queue *sqp;
6069 block_unblock_all_queues(true);
6070 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6072 k = find_first_bit(sqp->in_use_bm,
6074 if (k != sdebug_max_queue) {
6075 res = -EBUSY; /* queued commands */
6080 sdebug_jdelay = jdelay;
6083 block_unblock_all_queues(false);
6089 static DRIVER_ATTR_RW(delay);
6091 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6093 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6095 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6096 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6097 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6102 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6103 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6105 if (sdebug_ndelay != ndelay) {
6107 struct sdebug_queue *sqp;
6109 block_unblock_all_queues(true);
6110 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6112 k = find_first_bit(sqp->in_use_bm,
6114 if (k != sdebug_max_queue) {
6115 res = -EBUSY; /* queued commands */
6120 sdebug_ndelay = ndelay;
6121 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6124 block_unblock_all_queues(false);
6130 static DRIVER_ATTR_RW(ndelay);
6132 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6134 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6137 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6143 if (sscanf(buf, "%10s", work) == 1) {
6144 if (strncasecmp(work, "0x", 2) == 0) {
6145 if (kstrtoint(work + 2, 16, &opts) == 0)
6148 if (kstrtoint(work, 10, &opts) == 0)
6155 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6156 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6160 static DRIVER_ATTR_RW(opts);
6162 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6164 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6166 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6171 /* Cannot change from or to TYPE_ZBC with sysfs */
6172 if (sdebug_ptype == TYPE_ZBC)
6175 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6183 static DRIVER_ATTR_RW(ptype);
6185 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6187 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6189 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6194 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6200 static DRIVER_ATTR_RW(dsense);
6202 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6204 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6206 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6211 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6212 bool want_store = (n == 0);
6213 struct sdebug_host_info *sdhp;
6216 sdebug_fake_rw = (sdebug_fake_rw > 0);
6217 if (sdebug_fake_rw == n)
6218 return count; /* not transitioning so do nothing */
6220 if (want_store) { /* 1 --> 0 transition, set up store */
6221 if (sdeb_first_idx < 0) {
6222 idx = sdebug_add_store();
6226 idx = sdeb_first_idx;
6227 xa_clear_mark(per_store_ap, idx,
6228 SDEB_XA_NOT_IN_USE);
6230 /* make all hosts use same store */
6231 list_for_each_entry(sdhp, &sdebug_host_list,
6233 if (sdhp->si_idx != idx) {
6234 xa_set_mark(per_store_ap, sdhp->si_idx,
6235 SDEB_XA_NOT_IN_USE);
6239 sdeb_most_recent_idx = idx;
6240 } else { /* 0 --> 1 transition is trigger for shrink */
6241 sdebug_erase_all_stores(true /* apart from first */);
6248 static DRIVER_ATTR_RW(fake_rw);
6250 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6252 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6254 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6259 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6260 sdebug_no_lun_0 = n;
6265 static DRIVER_ATTR_RW(no_lun_0);
6267 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6269 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6271 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6276 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6277 sdebug_num_tgts = n;
6278 sdebug_max_tgts_luns();
6283 static DRIVER_ATTR_RW(num_tgts);
6285 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6287 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6289 static DRIVER_ATTR_RO(dev_size_mb);
6291 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6293 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6296 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6301 if (kstrtobool(buf, &v))
6304 sdebug_per_host_store = v;
6307 static DRIVER_ATTR_RW(per_host_store);
6309 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6311 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6313 static DRIVER_ATTR_RO(num_parts);
6315 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6317 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6319 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6325 if (sscanf(buf, "%10s", work) == 1) {
6326 if (strncasecmp(work, "0x", 2) == 0) {
6327 if (kstrtoint(work + 2, 16, &nth) == 0)
6328 goto every_nth_done;
6330 if (kstrtoint(work, 10, &nth) == 0)
6331 goto every_nth_done;
6337 sdebug_every_nth = nth;
6338 if (nth && !sdebug_statistics) {
6339 pr_info("every_nth needs statistics=1, set it\n");
6340 sdebug_statistics = true;
6345 static DRIVER_ATTR_RW(every_nth);
6347 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6349 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6351 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6357 if (kstrtoint(buf, 0, &n))
6360 if (n > (int)SAM_LUN_AM_FLAT) {
6361 pr_warn("only LUN address methods 0 and 1 are supported\n");
6364 changed = ((int)sdebug_lun_am != n);
6366 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6367 struct sdebug_host_info *sdhp;
6368 struct sdebug_dev_info *dp;
6370 spin_lock(&sdebug_host_list_lock);
6371 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6372 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6373 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6376 spin_unlock(&sdebug_host_list_lock);
6382 static DRIVER_ATTR_RW(lun_format);
6384 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6386 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6388 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6394 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6396 pr_warn("max_luns can be no more than 256\n");
6399 changed = (sdebug_max_luns != n);
6400 sdebug_max_luns = n;
6401 sdebug_max_tgts_luns();
6402 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6403 struct sdebug_host_info *sdhp;
6404 struct sdebug_dev_info *dp;
6406 spin_lock(&sdebug_host_list_lock);
6407 list_for_each_entry(sdhp, &sdebug_host_list,
6409 list_for_each_entry(dp, &sdhp->dev_info_list,
6411 set_bit(SDEBUG_UA_LUNS_CHANGED,
6415 spin_unlock(&sdebug_host_list_lock);
6421 static DRIVER_ATTR_RW(max_luns);
6423 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6425 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6427 /* N.B. max_queue can be changed while there are queued commands. In flight
6428 * commands beyond the new max_queue will be completed. */
6429 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6433 struct sdebug_queue *sqp;
6435 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6436 (n <= SDEBUG_CANQUEUE) &&
6437 (sdebug_host_max_queue == 0)) {
6438 block_unblock_all_queues(true);
6440 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6442 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6446 sdebug_max_queue = n;
6447 if (k == SDEBUG_CANQUEUE)
6448 atomic_set(&retired_max_queue, 0);
6450 atomic_set(&retired_max_queue, k + 1);
6452 atomic_set(&retired_max_queue, 0);
6453 block_unblock_all_queues(false);
6458 static DRIVER_ATTR_RW(max_queue);
6460 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6462 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6465 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6467 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6470 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6474 if (kstrtobool(buf, &v))
6477 sdebug_no_rwlock = v;
6480 static DRIVER_ATTR_RW(no_rwlock);
6483 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6484 * in range [0, sdebug_host_max_queue), we can't change it.
6486 static DRIVER_ATTR_RO(host_max_queue);
6488 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6490 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6492 static DRIVER_ATTR_RO(no_uld);
6494 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6496 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6498 static DRIVER_ATTR_RO(scsi_level);
6500 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6502 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6504 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6510 /* Ignore capacity change for ZBC drives for now */
6511 if (sdeb_zbc_in_use)
6514 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6515 changed = (sdebug_virtual_gb != n);
6516 sdebug_virtual_gb = n;
6517 sdebug_capacity = get_sdebug_capacity();
6519 struct sdebug_host_info *sdhp;
6520 struct sdebug_dev_info *dp;
6522 spin_lock(&sdebug_host_list_lock);
6523 list_for_each_entry(sdhp, &sdebug_host_list,
6525 list_for_each_entry(dp, &sdhp->dev_info_list,
6527 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6531 spin_unlock(&sdebug_host_list_lock);
6537 static DRIVER_ATTR_RW(virtual_gb);
6539 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6541 /* absolute number of hosts currently active is what is shown */
6542 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6545 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6550 struct sdeb_store_info *sip;
6551 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6554 if (sscanf(buf, "%d", &delta_hosts) != 1)
6556 if (delta_hosts > 0) {
6560 xa_for_each_marked(per_store_ap, idx, sip,
6561 SDEB_XA_NOT_IN_USE) {
6562 sdeb_most_recent_idx = (int)idx;
6566 if (found) /* re-use case */
6567 sdebug_add_host_helper((int)idx);
6569 sdebug_do_add_host(true);
6571 sdebug_do_add_host(false);
6573 } while (--delta_hosts);
6574 } else if (delta_hosts < 0) {
6576 sdebug_do_remove_host(false);
6577 } while (++delta_hosts);
6581 static DRIVER_ATTR_RW(add_host);
6583 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6585 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6587 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6592 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6593 sdebug_vpd_use_hostno = n;
6598 static DRIVER_ATTR_RW(vpd_use_hostno);
6600 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6602 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6604 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6609 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6611 sdebug_statistics = true;
6613 clear_queue_stats();
6614 sdebug_statistics = false;
6620 static DRIVER_ATTR_RW(statistics);
6622 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6624 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6626 static DRIVER_ATTR_RO(sector_size);
6628 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6630 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6632 static DRIVER_ATTR_RO(submit_queues);
6634 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6636 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6638 static DRIVER_ATTR_RO(dix);
6640 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6642 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6644 static DRIVER_ATTR_RO(dif);
6646 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6648 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6650 static DRIVER_ATTR_RO(guard);
6652 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6654 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6656 static DRIVER_ATTR_RO(ato);
6658 static ssize_t map_show(struct device_driver *ddp, char *buf)
6662 if (!scsi_debug_lbp())
6663 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6664 sdebug_store_sectors);
6666 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6667 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6670 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6671 (int)map_size, sip->map_storep);
6673 buf[count++] = '\n';
6678 static DRIVER_ATTR_RO(map);
6680 static ssize_t random_show(struct device_driver *ddp, char *buf)
6682 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6685 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6690 if (kstrtobool(buf, &v))
6696 static DRIVER_ATTR_RW(random);
6698 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6700 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6702 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6707 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6708 sdebug_removable = (n > 0);
6713 static DRIVER_ATTR_RW(removable);
6715 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6717 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6719 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6720 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6725 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6726 sdebug_host_lock = (n > 0);
6731 static DRIVER_ATTR_RW(host_lock);
6733 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6735 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6737 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6742 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6743 sdebug_strict = (n > 0);
6748 static DRIVER_ATTR_RW(strict);
6750 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6752 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6754 static DRIVER_ATTR_RO(uuid_ctl);
6756 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6758 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6760 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6765 ret = kstrtoint(buf, 0, &n);
6769 all_config_cdb_len();
6772 static DRIVER_ATTR_RW(cdb_len);
6774 static const char * const zbc_model_strs_a[] = {
6775 [BLK_ZONED_NONE] = "none",
6776 [BLK_ZONED_HA] = "host-aware",
6777 [BLK_ZONED_HM] = "host-managed",
6780 static const char * const zbc_model_strs_b[] = {
6781 [BLK_ZONED_NONE] = "no",
6782 [BLK_ZONED_HA] = "aware",
6783 [BLK_ZONED_HM] = "managed",
6786 static const char * const zbc_model_strs_c[] = {
6787 [BLK_ZONED_NONE] = "0",
6788 [BLK_ZONED_HA] = "1",
6789 [BLK_ZONED_HM] = "2",
6792 static int sdeb_zbc_model_str(const char *cp)
6794 int res = sysfs_match_string(zbc_model_strs_a, cp);
6797 res = sysfs_match_string(zbc_model_strs_b, cp);
6799 res = sysfs_match_string(zbc_model_strs_c, cp);
6807 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6809 return scnprintf(buf, PAGE_SIZE, "%s\n",
6810 zbc_model_strs_a[sdeb_zbc_model]);
6812 static DRIVER_ATTR_RO(zbc);
6814 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6816 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6818 static DRIVER_ATTR_RO(tur_ms_to_ready);
6820 /* Note: The following array creates attribute files in the
6821 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6822 files (over those found in the /sys/module/scsi_debug/parameters
6823 directory) is that auxiliary actions can be triggered when an attribute
6824 is changed. For example see: add_host_store() above.
6827 static struct attribute *sdebug_drv_attrs[] = {
6828 &driver_attr_delay.attr,
6829 &driver_attr_opts.attr,
6830 &driver_attr_ptype.attr,
6831 &driver_attr_dsense.attr,
6832 &driver_attr_fake_rw.attr,
6833 &driver_attr_host_max_queue.attr,
6834 &driver_attr_no_lun_0.attr,
6835 &driver_attr_num_tgts.attr,
6836 &driver_attr_dev_size_mb.attr,
6837 &driver_attr_num_parts.attr,
6838 &driver_attr_every_nth.attr,
6839 &driver_attr_lun_format.attr,
6840 &driver_attr_max_luns.attr,
6841 &driver_attr_max_queue.attr,
6842 &driver_attr_no_rwlock.attr,
6843 &driver_attr_no_uld.attr,
6844 &driver_attr_scsi_level.attr,
6845 &driver_attr_virtual_gb.attr,
6846 &driver_attr_add_host.attr,
6847 &driver_attr_per_host_store.attr,
6848 &driver_attr_vpd_use_hostno.attr,
6849 &driver_attr_sector_size.attr,
6850 &driver_attr_statistics.attr,
6851 &driver_attr_submit_queues.attr,
6852 &driver_attr_dix.attr,
6853 &driver_attr_dif.attr,
6854 &driver_attr_guard.attr,
6855 &driver_attr_ato.attr,
6856 &driver_attr_map.attr,
6857 &driver_attr_random.attr,
6858 &driver_attr_removable.attr,
6859 &driver_attr_host_lock.attr,
6860 &driver_attr_ndelay.attr,
6861 &driver_attr_strict.attr,
6862 &driver_attr_uuid_ctl.attr,
6863 &driver_attr_cdb_len.attr,
6864 &driver_attr_tur_ms_to_ready.attr,
6865 &driver_attr_zbc.attr,
6868 ATTRIBUTE_GROUPS(sdebug_drv);
6870 static struct device *pseudo_primary;
6872 static int __init scsi_debug_init(void)
6874 bool want_store = (sdebug_fake_rw == 0);
6876 int k, ret, hosts_to_add;
6879 ramdisk_lck_a[0] = &atomic_rw;
6880 ramdisk_lck_a[1] = &atomic_rw2;
6881 atomic_set(&retired_max_queue, 0);
6883 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6884 pr_warn("ndelay must be less than 1 second, ignored\n");
6886 } else if (sdebug_ndelay > 0)
6887 sdebug_jdelay = JDELAY_OVERRIDDEN;
6889 switch (sdebug_sector_size) {
6896 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6900 switch (sdebug_dif) {
6901 case T10_PI_TYPE0_PROTECTION:
6903 case T10_PI_TYPE1_PROTECTION:
6904 case T10_PI_TYPE2_PROTECTION:
6905 case T10_PI_TYPE3_PROTECTION:
6906 have_dif_prot = true;
6910 pr_err("dif must be 0, 1, 2 or 3\n");
6914 if (sdebug_num_tgts < 0) {
6915 pr_err("num_tgts must be >= 0\n");
6919 if (sdebug_guard > 1) {
6920 pr_err("guard must be 0 or 1\n");
6924 if (sdebug_ato > 1) {
6925 pr_err("ato must be 0 or 1\n");
6929 if (sdebug_physblk_exp > 15) {
6930 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6934 sdebug_lun_am = sdebug_lun_am_i;
6935 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6936 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6937 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6940 if (sdebug_max_luns > 256) {
6941 if (sdebug_max_luns > 16384) {
6942 pr_warn("max_luns can be no more than 16384, use default\n");
6943 sdebug_max_luns = DEF_MAX_LUNS;
6945 sdebug_lun_am = SAM_LUN_AM_FLAT;
6948 if (sdebug_lowest_aligned > 0x3fff) {
6949 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6953 if (submit_queues < 1) {
6954 pr_err("submit_queues must be 1 or more\n");
6958 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6959 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6963 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6964 (sdebug_host_max_queue < 0)) {
6965 pr_err("host_max_queue must be in range [0 %d]\n",
6970 if (sdebug_host_max_queue &&
6971 (sdebug_max_queue != sdebug_host_max_queue)) {
6972 sdebug_max_queue = sdebug_host_max_queue;
6973 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6977 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6979 if (sdebug_q_arr == NULL)
6981 for (k = 0; k < submit_queues; ++k)
6982 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6985 * check for host managed zoned block device specified with
6986 * ptype=0x14 or zbc=XXX.
6988 if (sdebug_ptype == TYPE_ZBC) {
6989 sdeb_zbc_model = BLK_ZONED_HM;
6990 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6991 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6997 switch (sdeb_zbc_model) {
6998 case BLK_ZONED_NONE:
7000 sdebug_ptype = TYPE_DISK;
7003 sdebug_ptype = TYPE_ZBC;
7006 pr_err("Invalid ZBC model\n");
7011 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7012 sdeb_zbc_in_use = true;
7013 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7014 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7017 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7018 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7019 if (sdebug_dev_size_mb < 1)
7020 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7021 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7022 sdebug_store_sectors = sz / sdebug_sector_size;
7023 sdebug_capacity = get_sdebug_capacity();
7025 /* play around with geometry, don't waste too much on track 0 */
7027 sdebug_sectors_per = 32;
7028 if (sdebug_dev_size_mb >= 256)
7030 else if (sdebug_dev_size_mb >= 16)
7032 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7033 (sdebug_sectors_per * sdebug_heads);
7034 if (sdebug_cylinders_per >= 1024) {
7035 /* other LLDs do this; implies >= 1GB ram disk ... */
7037 sdebug_sectors_per = 63;
7038 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7039 (sdebug_sectors_per * sdebug_heads);
7041 if (scsi_debug_lbp()) {
7042 sdebug_unmap_max_blocks =
7043 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7045 sdebug_unmap_max_desc =
7046 clamp(sdebug_unmap_max_desc, 0U, 256U);
7048 sdebug_unmap_granularity =
7049 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7051 if (sdebug_unmap_alignment &&
7052 sdebug_unmap_granularity <=
7053 sdebug_unmap_alignment) {
7054 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7059 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7061 idx = sdebug_add_store();
7068 pseudo_primary = root_device_register("pseudo_0");
7069 if (IS_ERR(pseudo_primary)) {
7070 pr_warn("root_device_register() error\n");
7071 ret = PTR_ERR(pseudo_primary);
7074 ret = bus_register(&pseudo_lld_bus);
7076 pr_warn("bus_register error: %d\n", ret);
7079 ret = driver_register(&sdebug_driverfs_driver);
7081 pr_warn("driver_register error: %d\n", ret);
7085 hosts_to_add = sdebug_add_host;
7086 sdebug_add_host = 0;
7088 for (k = 0; k < hosts_to_add; k++) {
7089 if (want_store && k == 0) {
7090 ret = sdebug_add_host_helper(idx);
7092 pr_err("add_host_helper k=%d, error=%d\n",
7097 ret = sdebug_do_add_host(want_store &&
7098 sdebug_per_host_store);
7100 pr_err("add_host k=%d error=%d\n", k, -ret);
7106 pr_info("built %d host(s)\n", sdebug_num_hosts);
7111 bus_unregister(&pseudo_lld_bus);
7113 root_device_unregister(pseudo_primary);
7115 sdebug_erase_store(idx, NULL);
7117 kfree(sdebug_q_arr);
7121 static void __exit scsi_debug_exit(void)
7123 int k = sdebug_num_hosts;
7127 sdebug_do_remove_host(true);
7129 driver_unregister(&sdebug_driverfs_driver);
7130 bus_unregister(&pseudo_lld_bus);
7131 root_device_unregister(pseudo_primary);
7133 sdebug_erase_all_stores(false);
7134 xa_destroy(per_store_ap);
7135 kfree(sdebug_q_arr);
7138 device_initcall(scsi_debug_init);
7139 module_exit(scsi_debug_exit);
7141 static void sdebug_release_adapter(struct device *dev)
7143 struct sdebug_host_info *sdbg_host;
7145 sdbg_host = to_sdebug_host(dev);
7149 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7150 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7155 if (xa_empty(per_store_ap))
7157 sip = xa_load(per_store_ap, idx);
7161 vfree(sip->map_storep);
7162 vfree(sip->dif_storep);
7164 xa_erase(per_store_ap, idx);
7168 /* Assume apart_from_first==false only in shutdown case. */
7169 static void sdebug_erase_all_stores(bool apart_from_first)
7172 struct sdeb_store_info *sip = NULL;
7174 xa_for_each(per_store_ap, idx, sip) {
7175 if (apart_from_first)
7176 apart_from_first = false;
7178 sdebug_erase_store(idx, sip);
7180 if (apart_from_first)
7181 sdeb_most_recent_idx = sdeb_first_idx;
7185 * Returns store xarray new element index (idx) if >=0 else negated errno.
7186 * Limit the number of stores to 65536.
7188 static int sdebug_add_store(void)
7192 unsigned long iflags;
7193 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7194 struct sdeb_store_info *sip = NULL;
7195 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7197 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7201 xa_lock_irqsave(per_store_ap, iflags);
7202 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7203 if (unlikely(res < 0)) {
7204 xa_unlock_irqrestore(per_store_ap, iflags);
7206 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7209 sdeb_most_recent_idx = n_idx;
7210 if (sdeb_first_idx < 0)
7211 sdeb_first_idx = n_idx;
7212 xa_unlock_irqrestore(per_store_ap, iflags);
7215 sip->storep = vzalloc(sz);
7217 pr_err("user data oom\n");
7220 if (sdebug_num_parts > 0)
7221 sdebug_build_parts(sip->storep, sz);
7223 /* DIF/DIX: what T10 calls Protection Information (PI) */
7227 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7228 sip->dif_storep = vmalloc(dif_size);
7230 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7233 if (!sip->dif_storep) {
7234 pr_err("DIX oom\n");
7237 memset(sip->dif_storep, 0xff, dif_size);
7239 /* Logical Block Provisioning */
7240 if (scsi_debug_lbp()) {
7241 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7242 sip->map_storep = vmalloc(array_size(sizeof(long),
7243 BITS_TO_LONGS(map_size)));
7245 pr_info("%lu provisioning blocks\n", map_size);
7247 if (!sip->map_storep) {
7248 pr_err("LBP map oom\n");
7252 bitmap_zero(sip->map_storep, map_size);
7254 /* Map first 1KB for partition table */
7255 if (sdebug_num_parts)
7256 map_region(sip, 0, 2);
7259 rwlock_init(&sip->macc_lck);
7262 sdebug_erase_store((int)n_idx, sip);
7263 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7267 static int sdebug_add_host_helper(int per_host_idx)
7269 int k, devs_per_host, idx;
7270 int error = -ENOMEM;
7271 struct sdebug_host_info *sdbg_host;
7272 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7274 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7277 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7278 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7279 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7280 sdbg_host->si_idx = idx;
7282 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7284 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7285 for (k = 0; k < devs_per_host; k++) {
7286 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7291 spin_lock(&sdebug_host_list_lock);
7292 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7293 spin_unlock(&sdebug_host_list_lock);
7295 sdbg_host->dev.bus = &pseudo_lld_bus;
7296 sdbg_host->dev.parent = pseudo_primary;
7297 sdbg_host->dev.release = &sdebug_release_adapter;
7298 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7300 error = device_register(&sdbg_host->dev);
7308 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7310 list_del(&sdbg_devinfo->dev_list);
7311 kfree(sdbg_devinfo->zstate);
7312 kfree(sdbg_devinfo);
7315 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7319 static int sdebug_do_add_host(bool mk_new_store)
7321 int ph_idx = sdeb_most_recent_idx;
7324 ph_idx = sdebug_add_store();
7328 return sdebug_add_host_helper(ph_idx);
7331 static void sdebug_do_remove_host(bool the_end)
7334 struct sdebug_host_info *sdbg_host = NULL;
7335 struct sdebug_host_info *sdbg_host2;
7337 spin_lock(&sdebug_host_list_lock);
7338 if (!list_empty(&sdebug_host_list)) {
7339 sdbg_host = list_entry(sdebug_host_list.prev,
7340 struct sdebug_host_info, host_list);
7341 idx = sdbg_host->si_idx;
7343 if (!the_end && idx >= 0) {
7346 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7347 if (sdbg_host2 == sdbg_host)
7349 if (idx == sdbg_host2->si_idx) {
7355 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7356 if (idx == sdeb_most_recent_idx)
7357 --sdeb_most_recent_idx;
7361 list_del(&sdbg_host->host_list);
7362 spin_unlock(&sdebug_host_list_lock);
7367 device_unregister(&sdbg_host->dev);
7371 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7374 struct sdebug_dev_info *devip;
7376 block_unblock_all_queues(true);
7377 devip = (struct sdebug_dev_info *)sdev->hostdata;
7378 if (NULL == devip) {
7379 block_unblock_all_queues(false);
7382 num_in_q = atomic_read(&devip->num_in_q);
7384 if (qdepth > SDEBUG_CANQUEUE) {
7385 qdepth = SDEBUG_CANQUEUE;
7386 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7387 qdepth, SDEBUG_CANQUEUE);
7391 if (qdepth != sdev->queue_depth)
7392 scsi_change_queue_depth(sdev, qdepth);
7394 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7395 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7396 __func__, qdepth, num_in_q);
7398 block_unblock_all_queues(false);
7399 return sdev->queue_depth;
7402 static bool fake_timeout(struct scsi_cmnd *scp)
7404 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7405 if (sdebug_every_nth < -1)
7406 sdebug_every_nth = -1;
7407 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7408 return true; /* ignore command causing timeout */
7409 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7410 scsi_medium_access_command(scp))
7411 return true; /* time out reads and writes */
7416 /* Response to TUR or media access command when device stopped */
7417 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7421 ktime_t now_ts = ktime_get_boottime();
7422 struct scsi_device *sdp = scp->device;
7424 stopped_state = atomic_read(&devip->stopped);
7425 if (stopped_state == 2) {
7426 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7427 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7428 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7429 /* tur_ms_to_ready timer extinguished */
7430 atomic_set(&devip->stopped, 0);
7434 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7436 sdev_printk(KERN_INFO, sdp,
7437 "%s: Not ready: in process of becoming ready\n", my_name);
7438 if (scp->cmnd[0] == TEST_UNIT_READY) {
7439 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7441 if (diff_ns <= tur_nanosecs_to_ready)
7442 diff_ns = tur_nanosecs_to_ready - diff_ns;
7444 diff_ns = tur_nanosecs_to_ready;
7445 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7446 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7447 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7449 return check_condition_result;
7452 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7454 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7456 return check_condition_result;
7459 static int sdebug_map_queues(struct Scsi_Host *shost)
7463 if (shost->nr_hw_queues == 1)
7466 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7467 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7471 if (i == HCTX_TYPE_DEFAULT)
7472 map->nr_queues = submit_queues - poll_queues;
7473 else if (i == HCTX_TYPE_POLL)
7474 map->nr_queues = poll_queues;
7476 if (!map->nr_queues) {
7477 BUG_ON(i == HCTX_TYPE_DEFAULT);
7481 map->queue_offset = qoff;
7482 blk_mq_map_queues(map);
7484 qoff += map->nr_queues;
7491 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7494 bool retiring = false;
7495 int num_entries = 0;
7496 unsigned int qc_idx = 0;
7497 unsigned long iflags;
7498 ktime_t kt_from_boot = ktime_get_boottime();
7499 struct sdebug_queue *sqp;
7500 struct sdebug_queued_cmd *sqcp;
7501 struct scsi_cmnd *scp;
7502 struct sdebug_dev_info *devip;
7503 struct sdebug_defer *sd_dp;
7505 sqp = sdebug_q_arr + queue_num;
7507 spin_lock_irqsave(&sqp->qc_lock, iflags);
7509 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7510 if (qc_idx >= sdebug_max_queue)
7513 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7516 if (!test_bit(qc_idx, sqp->in_use_bm))
7519 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7521 if (qc_idx >= sdebug_max_queue)
7524 sqcp = &sqp->qc_arr[qc_idx];
7525 sd_dp = sqcp->sd_dp;
7526 if (unlikely(!sd_dp))
7529 if (unlikely(scp == NULL)) {
7530 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7531 queue_num, qc_idx, __func__);
7534 if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7535 if (kt_from_boot < sd_dp->cmpl_ts)
7538 } else /* ignoring non REQ_POLLED requests */
7540 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7542 atomic_dec(&devip->num_in_q);
7544 pr_err("devip=NULL from %s\n", __func__);
7545 if (unlikely(atomic_read(&retired_max_queue) > 0))
7548 sqcp->a_cmnd = NULL;
7549 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7550 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7551 sqp, queue_num, qc_idx, __func__);
7554 if (unlikely(retiring)) { /* user has reduced max_queue */
7557 retval = atomic_read(&retired_max_queue);
7558 if (qc_idx >= retval) {
7559 pr_err("index %d too large\n", retval);
7562 k = find_last_bit(sqp->in_use_bm, retval);
7563 if ((k < sdebug_max_queue) || (k == retval))
7564 atomic_set(&retired_max_queue, 0);
7566 atomic_set(&retired_max_queue, k + 1);
7568 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7569 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7570 scsi_done(scp); /* callback to mid level */
7572 spin_lock_irqsave(&sqp->qc_lock, iflags);
7573 if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7578 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7580 if (num_entries > 0)
7581 atomic_add(num_entries, &sdeb_mq_poll_count);
7585 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7586 struct scsi_cmnd *scp)
7589 struct scsi_device *sdp = scp->device;
7590 const struct opcode_info_t *oip;
7591 const struct opcode_info_t *r_oip;
7592 struct sdebug_dev_info *devip;
7593 u8 *cmd = scp->cmnd;
7594 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7595 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7598 u64 lun_index = sdp->lun & 0x3FFF;
7605 scsi_set_resid(scp, 0);
7606 if (sdebug_statistics) {
7607 atomic_inc(&sdebug_cmnd_count);
7608 inject_now = inject_on_this_cmd();
7612 if (unlikely(sdebug_verbose &&
7613 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7618 sb = (int)sizeof(b);
7620 strcpy(b, "too long, over 32 bytes");
7622 for (k = 0, n = 0; k < len && n < sb; ++k)
7623 n += scnprintf(b + n, sb - n, "%02x ",
7626 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7627 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7629 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7630 return SCSI_MLQUEUE_HOST_BUSY;
7631 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7632 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7635 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7636 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7637 devip = (struct sdebug_dev_info *)sdp->hostdata;
7638 if (unlikely(!devip)) {
7639 devip = find_build_dev_info(sdp);
7643 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7644 atomic_set(&sdeb_inject_pending, 1);
7646 na = oip->num_attached;
7648 if (na) { /* multiple commands with this opcode */
7650 if (FF_SA & r_oip->flags) {
7651 if (F_SA_LOW & oip->flags)
7654 sa = get_unaligned_be16(cmd + 8);
7655 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7656 if (opcode == oip->opcode && sa == oip->sa)
7659 } else { /* since no service action only check opcode */
7660 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7661 if (opcode == oip->opcode)
7666 if (F_SA_LOW & r_oip->flags)
7667 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7668 else if (F_SA_HIGH & r_oip->flags)
7669 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7671 mk_sense_invalid_opcode(scp);
7674 } /* else (when na==0) we assume the oip is a match */
7676 if (unlikely(F_INV_OP & flags)) {
7677 mk_sense_invalid_opcode(scp);
7680 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7682 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7683 my_name, opcode, " supported for wlun");
7684 mk_sense_invalid_opcode(scp);
7687 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7691 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7692 rem = ~oip->len_mask[k] & cmd[k];
7694 for (j = 7; j >= 0; --j, rem <<= 1) {
7698 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7703 if (unlikely(!(F_SKIP_UA & flags) &&
7704 find_first_bit(devip->uas_bm,
7705 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7706 errsts = make_ua(scp, devip);
7710 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7711 atomic_read(&devip->stopped))) {
7712 errsts = resp_not_ready(scp, devip);
7716 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7718 if (unlikely(sdebug_every_nth)) {
7719 if (fake_timeout(scp))
7720 return 0; /* ignore command: make trouble */
7722 if (likely(oip->pfp))
7723 pfp = oip->pfp; /* calls a resp_* function */
7725 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7728 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7729 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7730 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7731 sdebug_ndelay > 10000)) {
7733 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7734 * for Start Stop Unit (SSU) want at least 1 second delay and
7735 * if sdebug_jdelay>1 want a long delay of that many seconds.
7736 * For Synchronize Cache want 1/20 of SSU's delay.
7738 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7739 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7741 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7742 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7744 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7747 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7749 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7752 static struct scsi_host_template sdebug_driver_template = {
7753 .show_info = scsi_debug_show_info,
7754 .write_info = scsi_debug_write_info,
7755 .proc_name = sdebug_proc_name,
7756 .name = "SCSI DEBUG",
7757 .info = scsi_debug_info,
7758 .slave_alloc = scsi_debug_slave_alloc,
7759 .slave_configure = scsi_debug_slave_configure,
7760 .slave_destroy = scsi_debug_slave_destroy,
7761 .ioctl = scsi_debug_ioctl,
7762 .queuecommand = scsi_debug_queuecommand,
7763 .change_queue_depth = sdebug_change_qdepth,
7764 .map_queues = sdebug_map_queues,
7765 .mq_poll = sdebug_blk_mq_poll,
7766 .eh_abort_handler = scsi_debug_abort,
7767 .eh_device_reset_handler = scsi_debug_device_reset,
7768 .eh_target_reset_handler = scsi_debug_target_reset,
7769 .eh_bus_reset_handler = scsi_debug_bus_reset,
7770 .eh_host_reset_handler = scsi_debug_host_reset,
7771 .can_queue = SDEBUG_CANQUEUE,
7773 .sg_tablesize = SG_MAX_SEGMENTS,
7774 .cmd_per_lun = DEF_CMD_PER_LUN,
7776 .max_segment_size = -1U,
7777 .module = THIS_MODULE,
7778 .track_queue_depth = 1,
7781 static int sdebug_driver_probe(struct device *dev)
7784 struct sdebug_host_info *sdbg_host;
7785 struct Scsi_Host *hpnt;
7788 sdbg_host = to_sdebug_host(dev);
7790 sdebug_driver_template.can_queue = sdebug_max_queue;
7791 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7792 if (!sdebug_clustering)
7793 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7795 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7797 pr_err("scsi_host_alloc failed\n");
7801 if (submit_queues > nr_cpu_ids) {
7802 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7803 my_name, submit_queues, nr_cpu_ids);
7804 submit_queues = nr_cpu_ids;
7807 * Decide whether to tell scsi subsystem that we want mq. The
7808 * following should give the same answer for each host.
7810 hpnt->nr_hw_queues = submit_queues;
7811 if (sdebug_host_max_queue)
7812 hpnt->host_tagset = 1;
7814 /* poll queues are possible for nr_hw_queues > 1 */
7815 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7816 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7817 my_name, poll_queues, hpnt->nr_hw_queues);
7822 * Poll queues don't need interrupts, but we need at least one I/O queue
7823 * left over for non-polled I/O.
7824 * If condition not met, trim poll_queues to 1 (just for simplicity).
7826 if (poll_queues >= submit_queues) {
7827 if (submit_queues < 3)
7828 pr_warn("%s: trim poll_queues to 1\n", my_name);
7830 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7831 my_name, submit_queues - 1);
7837 sdbg_host->shost = hpnt;
7838 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7839 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7840 hpnt->max_id = sdebug_num_tgts + 1;
7842 hpnt->max_id = sdebug_num_tgts;
7843 /* = sdebug_max_luns; */
7844 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7848 switch (sdebug_dif) {
7850 case T10_PI_TYPE1_PROTECTION:
7851 hprot = SHOST_DIF_TYPE1_PROTECTION;
7853 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7856 case T10_PI_TYPE2_PROTECTION:
7857 hprot = SHOST_DIF_TYPE2_PROTECTION;
7859 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7862 case T10_PI_TYPE3_PROTECTION:
7863 hprot = SHOST_DIF_TYPE3_PROTECTION;
7865 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7870 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7874 scsi_host_set_prot(hpnt, hprot);
7876 if (have_dif_prot || sdebug_dix)
7877 pr_info("host protection%s%s%s%s%s%s%s\n",
7878 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7879 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7880 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7881 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7882 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7883 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7884 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7886 if (sdebug_guard == 1)
7887 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7889 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7891 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7892 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7893 if (sdebug_every_nth) /* need stats counters for every_nth */
7894 sdebug_statistics = true;
7895 error = scsi_add_host(hpnt, &sdbg_host->dev);
7897 pr_err("scsi_add_host failed\n");
7899 scsi_host_put(hpnt);
7901 scsi_scan_host(hpnt);
7907 static void sdebug_driver_remove(struct device *dev)
7909 struct sdebug_host_info *sdbg_host;
7910 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7912 sdbg_host = to_sdebug_host(dev);
7914 scsi_remove_host(sdbg_host->shost);
7916 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7918 list_del(&sdbg_devinfo->dev_list);
7919 kfree(sdbg_devinfo->zstate);
7920 kfree(sdbg_devinfo);
7923 scsi_host_put(sdbg_host->shost);
7926 static int pseudo_lld_bus_match(struct device *dev,
7927 struct device_driver *dev_driver)
7932 static struct bus_type pseudo_lld_bus = {
7934 .match = pseudo_lld_bus_match,
7935 .probe = sdebug_driver_probe,
7936 .remove = sdebug_driver_remove,
7937 .drv_groups = sdebug_drv_groups,