1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER = 0x0,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
288 atomic_t stopped; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
310 struct list_head dev_info_list;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 struct sdebug_defer {
329 struct execute_work ew;
330 ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
333 int hc_idx; /* hostwide tag index */
338 bool aborted; /* true when blk_abort_request() already called */
339 enum sdeb_defer_type defer_t;
342 struct sdebug_queued_cmd {
343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 * instance indicates this slot is in use.
346 struct sdebug_defer *sd_dp;
347 struct scsi_cmnd *a_cmnd;
350 struct sdebug_queue {
351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 atomic_t blocked; /* to temporarily stop more being queued */
357 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
358 static atomic_t sdebug_completions; /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
364 struct opcode_info_t {
365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
366 /* for terminating element */
367 u8 opcode; /* if num_attached > 0, preferred */
368 u16 sa; /* service action */
369 u32 flags; /* OR-ed set of SDEB_F_* */
370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 SDEB_I_INVALID_OPCODE = 0,
380 SDEB_I_REPORT_LUNS = 2,
381 SDEB_I_REQUEST_SENSE = 3,
382 SDEB_I_TEST_UNIT_READY = 4,
383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
385 SDEB_I_LOG_SENSE = 7,
386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
389 SDEB_I_START_STOP = 11,
390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
392 SDEB_I_MAINT_IN = 14,
393 SDEB_I_MAINT_OUT = 15,
394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
396 SDEB_I_RESERVE = 18, /* 6, 10 */
397 SDEB_I_RELEASE = 19, /* 6, 10 */
398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
400 SDEB_I_ATA_PT = 22, /* 12, 16 */
401 SDEB_I_SEND_DIAG = 23,
403 SDEB_I_WRITE_BUFFER = 25,
404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
406 SDEB_I_COMP_WRITE = 28,
407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 0, 0, 0, SDEB_I_VERIFY,
442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 SDEB_I_MAINT_OUT, 0, 0, 0,
448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 * The following "response" functions return the SCSI mid-level's 4 byte
461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462 * command completion, they can mask their return value with
463 * SDEG_RES_IMMED_MASK .
465 #define SDEG_RES_IMMED_MASK 0x40000000
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
505 * The following are overflow arrays for cdbs that "hit" the same index in
506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507 * should be placed in opcode_info_arr[], the others should be placed here.
509 static const struct opcode_info_t msense_iarr[] = {
510 {0, 0x1a, 0, F_D_IN, NULL, NULL,
511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 static const struct opcode_info_t mselect_iarr[] = {
515 {0, 0x15, 0, F_D_OUT, NULL, NULL,
516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 static const struct opcode_info_t read_iarr[] = {
520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530 static const struct opcode_info_t write_iarr[] = {
531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xbf, 0xc7, 0, 0, 0, 0} },
542 static const struct opcode_info_t verify_iarr[] = {
543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
554 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
563 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
572 static const struct opcode_info_t write_same_iarr[] = {
573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
578 static const struct opcode_info_t reserve_iarr[] = {
579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 static const struct opcode_info_t release_iarr[] = {
584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
600 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
612 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620 * plus the terminating elements for logic that scans this table such as
621 * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 0, 0} }, /* REPORT LUNS */
631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 resp_write_dt0, write_iarr, /* WRITE(16) */
654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 0xff, 0, 0xc7, 0, 0, 0, 0} },
670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 NULL, release_iarr, /* RELEASE(10) <no response function> */
686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 0, 0, 0, 0} }, /* WRITE_BUFFER */
703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 resp_sync_cache, sync_cache_iarr,
709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 resp_pre_fetch, pre_fetch_iarr,
716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 0, 0, 0, 0} }, /* PRE-FETCH (10) */
720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue; /* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 SAM_LUN_AM_FLAT = 0x1,
795 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity; /* in sectors */
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804 may still need them */
805 static int sdebug_heads; /* heads per disk */
806 static int sdebug_cylinders_per; /* cylinders per surface */
807 static int sdebug_sectors_per; /* sectors per cylinder */
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1; /* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
834 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
841 static rwlock_t *ramdisk_lck_a[2];
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
846 static struct bus_type pseudo_lld_bus;
848 static struct device_driver sdebug_driverfs_driver = {
849 .name = sdebug_proc_name,
850 .bus = &pseudo_lld_bus,
853 static const int check_condition_result =
854 SAM_STAT_CHECK_CONDITION;
856 static const int illegal_condition_result =
857 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859 static const int device_qfull_result =
860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
865 /* Only do the extra work involved in logical block provisioning if one or
866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867 * real reads and writes (i.e. not skipping them for speed).
869 static inline bool scsi_debug_lbp(void)
871 return 0 == sdebug_fake_rw &&
872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 unsigned long long lba)
878 struct sdeb_store_info *lsip = sip;
880 lba = do_div(lba, sdebug_store_sectors);
881 if (!sip || !sip->storep) {
883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
885 return lsip->storep + lba * sdebug_sector_size;
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
891 sector = sector_div(sector, sdebug_store_sectors);
893 return sip->dif_storep + sector;
896 static void sdebug_max_tgts_luns(void)
898 struct sdebug_host_info *sdbg_host;
899 struct Scsi_Host *hpnt;
901 spin_lock(&sdebug_host_list_lock);
902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 hpnt = sdbg_host->shost;
904 if ((hpnt->this_id >= 0) &&
905 (sdebug_num_tgts > hpnt->this_id))
906 hpnt->max_id = sdebug_num_tgts + 1;
908 hpnt->max_id = sdebug_num_tgts;
909 /* sdebug_max_luns; */
910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 spin_unlock(&sdebug_host_list_lock);
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 enum sdeb_cmd_data c_d,
920 int in_byte, int in_bit)
922 unsigned char *sbuff;
926 sbuff = scp->sense_buffer;
928 sdev_printk(KERN_ERR, scp->device,
929 "%s: sense_buffer is NULL\n", __func__);
932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
935 memset(sks, 0, sizeof(sks));
941 sks[0] |= 0x7 & in_bit;
943 put_unaligned_be16(in_byte, sks + 1);
949 memcpy(sbuff + sl + 4, sks, 3);
951 memcpy(sbuff + 15, sks, 3);
953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 if (!scp->sense_buffer) {
961 sdev_printk(KERN_ERR, scp->device,
962 "%s: sense_buffer is NULL\n", __func__);
965 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
967 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
970 sdev_printk(KERN_INFO, scp->device,
971 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 my_name, key, asc, asq);
975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
983 if (sdebug_verbose) {
985 sdev_printk(KERN_INFO, dev,
986 "%s: BLKFLSBUF [0x1261]\n", __func__);
987 else if (0x5331 == cmd)
988 sdev_printk(KERN_INFO, dev,
989 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
992 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
996 /* return -ENOTTY; // correct return but upsets fdisk */
999 static void config_cdb_len(struct scsi_device *sdev)
1001 switch (sdebug_cdb_len) {
1002 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 sdev->use_10_for_rw = false;
1004 sdev->use_16_for_rw = false;
1005 sdev->use_10_for_ms = false;
1007 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 sdev->use_10_for_rw = true;
1009 sdev->use_16_for_rw = false;
1010 sdev->use_10_for_ms = false;
1012 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 sdev->use_10_for_rw = true;
1014 sdev->use_16_for_rw = false;
1015 sdev->use_10_for_ms = true;
1018 sdev->use_10_for_rw = false;
1019 sdev->use_16_for_rw = true;
1020 sdev->use_10_for_ms = true;
1022 case 32: /* No knobs to suggest this so same as 16 for now */
1023 sdev->use_10_for_rw = false;
1024 sdev->use_16_for_rw = true;
1025 sdev->use_10_for_ms = true;
1028 pr_warn("unexpected cdb_len=%d, force to 10\n",
1030 sdev->use_10_for_rw = true;
1031 sdev->use_16_for_rw = false;
1032 sdev->use_10_for_ms = false;
1033 sdebug_cdb_len = 10;
1038 static void all_config_cdb_len(void)
1040 struct sdebug_host_info *sdbg_host;
1041 struct Scsi_Host *shost;
1042 struct scsi_device *sdev;
1044 spin_lock(&sdebug_host_list_lock);
1045 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 shost = sdbg_host->shost;
1047 shost_for_each_device(sdev, shost) {
1048 config_cdb_len(sdev);
1051 spin_unlock(&sdebug_host_list_lock);
1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1056 struct sdebug_host_info *sdhp;
1057 struct sdebug_dev_info *dp;
1059 spin_lock(&sdebug_host_list_lock);
1060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 if ((devip->sdbg_host == dp->sdbg_host) &&
1063 (devip->target == dp->target))
1064 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1067 spin_unlock(&sdebug_host_list_lock);
1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1074 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 if (k != SDEBUG_NUM_UAS) {
1076 const char *cp = NULL;
1080 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 POWER_ON_RESET_ASCQ);
1083 cp = "power on reset";
1085 case SDEBUG_UA_BUS_RESET:
1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 case SDEBUG_UA_MODE_CHANGED:
1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1095 cp = "mode parameters changed";
1097 case SDEBUG_UA_CAPACITY_CHANGED:
1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 CAPACITY_CHANGED_ASCQ);
1101 cp = "capacity data changed";
1103 case SDEBUG_UA_MICROCODE_CHANGED:
1104 mk_sense_buffer(scp, UNIT_ATTENTION,
1106 MICROCODE_CHANGED_ASCQ);
1108 cp = "microcode has been changed";
1110 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1111 mk_sense_buffer(scp, UNIT_ATTENTION,
1113 MICROCODE_CHANGED_WO_RESET_ASCQ);
1115 cp = "microcode has been changed without reset";
1117 case SDEBUG_UA_LUNS_CHANGED:
1119 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 * on the target, until a REPORT LUNS command is
1122 * received. SPC-4 behavior is to report it only once.
1123 * NOTE: sdebug_scsi_level does not use the same
1124 * values as struct scsi_device->scsi_level.
1126 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1127 clear_luns_changed_on_target(devip);
1128 mk_sense_buffer(scp, UNIT_ATTENTION,
1132 cp = "reported luns data has changed";
1135 pr_warn("unexpected unit attention code=%d\n", k);
1140 clear_bit(k, devip->uas_bm);
1142 sdev_printk(KERN_INFO, scp->device,
1143 "%s reports: Unit attention: %s\n",
1145 return check_condition_result;
1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1155 struct scsi_data_buffer *sdb = &scp->sdb;
1159 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1160 return DID_ERROR << 16;
1162 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1164 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171 * calls, not required to write in ascending offset order. Assumes resid
1172 * set to scsi_bufflen() prior to any calls.
1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 int arr_len, unsigned int off_dst)
1177 unsigned int act_len, n;
1178 struct scsi_data_buffer *sdb = &scp->sdb;
1179 off_t skip = off_dst;
1181 if (sdb->length <= off_dst)
1183 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1184 return DID_ERROR << 16;
1186 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 arr, arr_len, skip);
1188 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1189 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 scsi_get_resid(scp));
1191 n = scsi_bufflen(scp) - (off_dst + act_len);
1192 scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197 * 'arr' or -1 if error.
1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1202 if (!scsi_bufflen(scp))
1204 if (scp->sc_data_direction != DMA_TO_DEVICE)
1207 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1211 static char sdebug_inq_vendor_id[9] = "Linux ";
1212 static char sdebug_inq_product_id[17] = "scsi_debug ";
1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1214 /* Use some locally assigned NAAs for SAS addresses. */
1215 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1219 /* Device identification VPD page. Returns number of bytes placed in arr */
1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 int target_dev_id, int dev_id_num,
1222 const char *dev_id_str, int dev_id_str_len,
1223 const uuid_t *lu_name)
1228 port_a = target_dev_id + 1;
1229 /* T10 vendor identifier field format (faked) */
1230 arr[0] = 0x2; /* ASCII */
1233 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 memcpy(&arr[12], sdebug_inq_product_id, 16);
1235 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 num = 8 + 16 + dev_id_str_len;
1239 if (dev_id_num >= 0) {
1240 if (sdebug_uuid_ctl) {
1241 /* Locally assigned UUID */
1242 arr[num++] = 0x1; /* binary (not necessarily sas) */
1243 arr[num++] = 0xa; /* PIV=0, lu, naa */
1246 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1248 memcpy(arr + num, lu_name, 16);
1251 /* NAA-3, Logical unit identifier (binary) */
1252 arr[num++] = 0x1; /* binary (not necessarily sas) */
1253 arr[num++] = 0x3; /* PIV=0, lu, naa */
1256 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1259 /* Target relative port number */
1260 arr[num++] = 0x61; /* proto=sas, binary */
1261 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1262 arr[num++] = 0x0; /* reserved */
1263 arr[num++] = 0x4; /* length */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0; /* reserved */
1267 arr[num++] = 0x1; /* relative port A */
1269 /* NAA-3, Target port identifier */
1270 arr[num++] = 0x61; /* proto=sas, binary */
1271 arr[num++] = 0x93; /* piv=1, target port, naa */
1274 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1276 /* NAA-3, Target port group identifier */
1277 arr[num++] = 0x61; /* proto=sas, binary */
1278 arr[num++] = 0x95; /* piv=1, target port group id */
1283 put_unaligned_be16(port_group_id, arr + num);
1285 /* NAA-3, Target device identifier */
1286 arr[num++] = 0x61; /* proto=sas, binary */
1287 arr[num++] = 0xa3; /* piv=1, target device, naa */
1290 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1292 /* SCSI name string: Target device identifier */
1293 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1294 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1297 memcpy(arr + num, "naa.32222220", 12);
1299 snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 memcpy(arr + num, b, 8);
1302 memset(arr + num, 0, 4);
1307 static unsigned char vpd84_data[] = {
1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309 0x22,0x22,0x22,0x0,0xbb,0x1,
1310 0x22,0x22,0x22,0x0,0xbb,0x2,
1313 /* Software interface identification VPD page */
1314 static int inquiry_vpd_84(unsigned char *arr)
1316 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 return sizeof(vpd84_data);
1320 /* Management network addresses VPD page */
1321 static int inquiry_vpd_85(unsigned char *arr)
1324 const char *na1 = "https://www.kernel.org/config";
1325 const char *na2 = "http://www.kernel.org/log";
1328 arr[num++] = 0x1; /* lu, storage config */
1329 arr[num++] = 0x0; /* reserved */
1334 plen = ((plen / 4) + 1) * 4;
1335 arr[num++] = plen; /* length, null termianted, padded */
1336 memcpy(arr + num, na1, olen);
1337 memset(arr + num + olen, 0, plen - olen);
1340 arr[num++] = 0x4; /* lu, logging */
1341 arr[num++] = 0x0; /* reserved */
1346 plen = ((plen / 4) + 1) * 4;
1347 arr[num++] = plen; /* length, null terminated, padded */
1348 memcpy(arr + num, na2, olen);
1349 memset(arr + num + olen, 0, plen - olen);
1355 /* SCSI ports VPD page */
1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 port_a = target_dev_id + 1;
1362 port_b = port_a + 1;
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0; /* reserved */
1366 arr[num++] = 0x1; /* relative port 1 (primary) */
1367 memset(arr + num, 0, 6);
1370 arr[num++] = 12; /* length tp descriptor */
1371 /* naa-5 target port identifier (A) */
1372 arr[num++] = 0x61; /* proto=sas, binary */
1373 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1374 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x8; /* length */
1376 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0; /* reserved */
1381 arr[num++] = 0x2; /* relative port 2 (secondary) */
1382 memset(arr + num, 0, 6);
1385 arr[num++] = 12; /* length tp descriptor */
1386 /* naa-5 target port identifier (B) */
1387 arr[num++] = 0x61; /* proto=sas, binary */
1388 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1389 arr[num++] = 0x0; /* reserved */
1390 arr[num++] = 0x8; /* length */
1391 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1398 static unsigned char vpd89_data[] = {
1399 /* from 4th byte */ 0,0,0,0,
1400 'l','i','n','u','x',' ',' ',' ',
1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1442 /* ATA Information VPD page */
1443 static int inquiry_vpd_89(unsigned char *arr)
1445 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 return sizeof(vpd89_data);
1450 static unsigned char vpdb0_data[] = {
1451 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 /* Block limits VPD page (SBC-3) */
1458 static int inquiry_vpd_b0(unsigned char *arr)
1462 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1464 /* Optimal transfer length granularity */
1465 if (sdebug_opt_xferlen_exp != 0 &&
1466 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 gran = 1 << sdebug_opt_xferlen_exp;
1469 gran = 1 << sdebug_physblk_exp;
1470 put_unaligned_be16(gran, arr + 2);
1472 /* Maximum Transfer Length */
1473 if (sdebug_store_sectors > 0x400)
1474 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1476 /* Optimal Transfer Length */
1477 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1480 /* Maximum Unmap LBA Count */
1481 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1483 /* Maximum Unmap Block Descriptor Count */
1484 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1487 /* Unmap Granularity Alignment */
1488 if (sdebug_unmap_alignment) {
1489 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1490 arr[28] |= 0x80; /* UGAVALID */
1493 /* Optimal Unmap Granularity */
1494 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1496 /* Maximum WRITE SAME Length */
1497 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1499 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1501 return sizeof(vpdb0_data);
1504 /* Block device characteristics VPD page (SBC-3) */
1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1507 memset(arr, 0, 0x3c);
1509 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1511 arr[3] = 5; /* less than 1.8" */
1512 if (devip->zmodel == BLK_ZONED_HA)
1513 arr[4] = 1 << 4; /* zoned field = 01b */
1518 /* Logical block provisioning VPD page (SBC-4) */
1519 static int inquiry_vpd_b2(unsigned char *arr)
1521 memset(arr, 0, 0x4);
1522 arr[0] = 0; /* threshold exponent */
1529 if (sdebug_lbprz && scsi_debug_lbp())
1530 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1531 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 /* threshold_percentage=0 */
1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1540 memset(arr, 0, 0x3c);
1541 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1543 * Set Optimal number of open sequential write preferred zones and
1544 * Optimal number of non-sequentially written sequential write
1545 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 * fields set to zero, apart from Max. number of open swrz_s field.
1548 put_unaligned_be32(0xffffffff, &arr[4]);
1549 put_unaligned_be32(0xffffffff, &arr[8]);
1550 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1551 put_unaligned_be32(devip->max_open, &arr[12]);
1553 put_unaligned_be32(0xffffffff, &arr[12]);
1557 #define SDEBUG_LONG_INQ_SZ 96
1558 #define SDEBUG_MAX_INQ_ARR_SZ 584
1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1562 unsigned char pq_pdt;
1564 unsigned char *cmd = scp->cmnd;
1565 int alloc_len, n, ret;
1566 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1568 alloc_len = get_unaligned_be16(cmd + 3);
1569 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1571 return DID_REQUEUE << 16;
1572 is_disk = (sdebug_ptype == TYPE_DISK);
1573 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1574 is_disk_zbc = (is_disk || is_zbc);
1575 have_wlun = scsi_is_wlun(scp->device->lun);
1577 pq_pdt = TYPE_WLUN; /* present, wlun */
1578 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1579 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1581 pq_pdt = (sdebug_ptype & 0x1f);
1583 if (0x2 & cmd[1]) { /* CMDDT bit set */
1584 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1586 return check_condition_result;
1587 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1588 int lu_id_num, port_group_id, target_dev_id, len;
1590 int host_no = devip->sdbg_host->shost->host_no;
1592 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1593 (devip->channel & 0x7f);
1594 if (sdebug_vpd_use_hostno == 0)
1596 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1597 (devip->target * 1000) + devip->lun);
1598 target_dev_id = ((host_no + 1) * 2000) +
1599 (devip->target * 1000) - 3;
1600 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1601 if (0 == cmd[2]) { /* supported vital product data pages */
1602 arr[1] = cmd[2]; /*sanity */
1604 arr[n++] = 0x0; /* this page */
1605 arr[n++] = 0x80; /* unit serial number */
1606 arr[n++] = 0x83; /* device identification */
1607 arr[n++] = 0x84; /* software interface ident. */
1608 arr[n++] = 0x85; /* management network addresses */
1609 arr[n++] = 0x86; /* extended inquiry */
1610 arr[n++] = 0x87; /* mode page policy */
1611 arr[n++] = 0x88; /* SCSI ports */
1612 if (is_disk_zbc) { /* SBC or ZBC */
1613 arr[n++] = 0x89; /* ATA information */
1614 arr[n++] = 0xb0; /* Block limits */
1615 arr[n++] = 0xb1; /* Block characteristics */
1617 arr[n++] = 0xb2; /* LB Provisioning */
1619 arr[n++] = 0xb6; /* ZB dev. char. */
1621 arr[3] = n - 4; /* number of supported VPD pages */
1622 } else if (0x80 == cmd[2]) { /* unit serial number */
1623 arr[1] = cmd[2]; /*sanity */
1625 memcpy(&arr[4], lu_id_str, len);
1626 } else if (0x83 == cmd[2]) { /* device identification */
1627 arr[1] = cmd[2]; /*sanity */
1628 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1629 target_dev_id, lu_id_num,
1632 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1633 arr[1] = cmd[2]; /*sanity */
1634 arr[3] = inquiry_vpd_84(&arr[4]);
1635 } else if (0x85 == cmd[2]) { /* Management network addresses */
1636 arr[1] = cmd[2]; /*sanity */
1637 arr[3] = inquiry_vpd_85(&arr[4]);
1638 } else if (0x86 == cmd[2]) { /* extended inquiry */
1639 arr[1] = cmd[2]; /*sanity */
1640 arr[3] = 0x3c; /* number of following entries */
1641 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1642 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1643 else if (have_dif_prot)
1644 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1646 arr[4] = 0x0; /* no protection stuff */
1647 arr[5] = 0x7; /* head of q, ordered + simple q's */
1648 } else if (0x87 == cmd[2]) { /* mode page policy */
1649 arr[1] = cmd[2]; /*sanity */
1650 arr[3] = 0x8; /* number of following entries */
1651 arr[4] = 0x2; /* disconnect-reconnect mp */
1652 arr[6] = 0x80; /* mlus, shared */
1653 arr[8] = 0x18; /* protocol specific lu */
1654 arr[10] = 0x82; /* mlus, per initiator port */
1655 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1656 arr[1] = cmd[2]; /*sanity */
1657 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1658 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1659 arr[1] = cmd[2]; /*sanity */
1660 n = inquiry_vpd_89(&arr[4]);
1661 put_unaligned_be16(n, arr + 2);
1662 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1663 arr[1] = cmd[2]; /*sanity */
1664 arr[3] = inquiry_vpd_b0(&arr[4]);
1665 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1666 arr[1] = cmd[2]; /*sanity */
1667 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1668 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1669 arr[1] = cmd[2]; /*sanity */
1670 arr[3] = inquiry_vpd_b2(&arr[4]);
1671 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1672 arr[1] = cmd[2]; /*sanity */
1673 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1675 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1677 return check_condition_result;
1679 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1680 ret = fill_from_dev_buffer(scp, arr,
1681 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1685 /* drops through here for a standard inquiry */
1686 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1687 arr[2] = sdebug_scsi_level;
1688 arr[3] = 2; /* response_data_format==2 */
1689 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1690 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1691 if (sdebug_vpd_use_hostno == 0)
1692 arr[5] |= 0x10; /* claim: implicit TPGS */
1693 arr[6] = 0x10; /* claim: MultiP */
1694 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1695 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1696 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1697 memcpy(&arr[16], sdebug_inq_product_id, 16);
1698 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1699 /* Use Vendor Specific area to place driver date in ASCII hex */
1700 memcpy(&arr[36], sdebug_version_date, 8);
1701 /* version descriptors (2 bytes each) follow */
1702 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1703 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1705 if (is_disk) { /* SBC-4 no version claimed */
1706 put_unaligned_be16(0x600, arr + n);
1708 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1709 put_unaligned_be16(0x525, arr + n);
1711 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1712 put_unaligned_be16(0x624, arr + n);
1715 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1716 ret = fill_from_dev_buffer(scp, arr,
1717 min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1722 /* See resp_iec_m_pg() for how this data is manipulated */
1723 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1726 static int resp_requests(struct scsi_cmnd *scp,
1727 struct sdebug_dev_info *devip)
1729 unsigned char *cmd = scp->cmnd;
1730 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1731 bool dsense = !!(cmd[1] & 1);
1732 int alloc_len = cmd[4];
1734 int stopped_state = atomic_read(&devip->stopped);
1736 memset(arr, 0, sizeof(arr));
1737 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1741 arr[2] = LOGICAL_UNIT_NOT_READY;
1742 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1746 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1747 arr[7] = 0xa; /* 18 byte sense buffer */
1748 arr[12] = LOGICAL_UNIT_NOT_READY;
1749 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1751 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1752 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1755 arr[1] = 0x0; /* NO_SENSE in sense_key */
1756 arr[2] = THRESHOLD_EXCEEDED;
1757 arr[3] = 0xff; /* Failure prediction(false) */
1761 arr[2] = 0x0; /* NO_SENSE in sense_key */
1762 arr[7] = 0xa; /* 18 byte sense buffer */
1763 arr[12] = THRESHOLD_EXCEEDED;
1764 arr[13] = 0xff; /* Failure prediction(false) */
1766 } else { /* nothing to report */
1769 memset(arr, 0, len);
1772 memset(arr, 0, len);
1777 return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1780 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1782 unsigned char *cmd = scp->cmnd;
1783 int power_cond, want_stop, stopped_state;
1786 power_cond = (cmd[4] & 0xf0) >> 4;
1788 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1789 return check_condition_result;
1791 want_stop = !(cmd[4] & 1);
1792 stopped_state = atomic_read(&devip->stopped);
1793 if (stopped_state == 2) {
1794 ktime_t now_ts = ktime_get_boottime();
1796 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1797 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1799 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1800 /* tur_ms_to_ready timer extinguished */
1801 atomic_set(&devip->stopped, 0);
1805 if (stopped_state == 2) {
1807 stopped_state = 1; /* dummy up success */
1808 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1809 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1810 return check_condition_result;
1814 changing = (stopped_state != want_stop);
1816 atomic_xchg(&devip->stopped, want_stop);
1817 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1818 return SDEG_RES_IMMED_MASK;
1823 static sector_t get_sdebug_capacity(void)
1825 static const unsigned int gibibyte = 1073741824;
1827 if (sdebug_virtual_gb > 0)
1828 return (sector_t)sdebug_virtual_gb *
1829 (gibibyte / sdebug_sector_size);
1831 return sdebug_store_sectors;
1834 #define SDEBUG_READCAP_ARR_SZ 8
1835 static int resp_readcap(struct scsi_cmnd *scp,
1836 struct sdebug_dev_info *devip)
1838 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1841 /* following just in case virtual_gb changed */
1842 sdebug_capacity = get_sdebug_capacity();
1843 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1844 if (sdebug_capacity < 0xffffffff) {
1845 capac = (unsigned int)sdebug_capacity - 1;
1846 put_unaligned_be32(capac, arr + 0);
1848 put_unaligned_be32(0xffffffff, arr + 0);
1849 put_unaligned_be16(sdebug_sector_size, arr + 6);
1850 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1853 #define SDEBUG_READCAP16_ARR_SZ 32
1854 static int resp_readcap16(struct scsi_cmnd *scp,
1855 struct sdebug_dev_info *devip)
1857 unsigned char *cmd = scp->cmnd;
1858 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1861 alloc_len = get_unaligned_be32(cmd + 10);
1862 /* following just in case virtual_gb changed */
1863 sdebug_capacity = get_sdebug_capacity();
1864 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1865 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1866 put_unaligned_be32(sdebug_sector_size, arr + 8);
1867 arr[13] = sdebug_physblk_exp & 0xf;
1868 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1870 if (scsi_debug_lbp()) {
1871 arr[14] |= 0x80; /* LBPME */
1872 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1873 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1874 * in the wider field maps to 0 in this field.
1876 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1880 arr[15] = sdebug_lowest_aligned & 0xff;
1882 if (have_dif_prot) {
1883 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1884 arr[12] |= 1; /* PROT_EN */
1887 return fill_from_dev_buffer(scp, arr,
1888 min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1891 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1893 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1894 struct sdebug_dev_info *devip)
1896 unsigned char *cmd = scp->cmnd;
1898 int host_no = devip->sdbg_host->shost->host_no;
1899 int n, ret, alen, rlen;
1900 int port_group_a, port_group_b, port_a, port_b;
1902 alen = get_unaligned_be32(cmd + 6);
1903 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1905 return DID_REQUEUE << 16;
1907 * EVPD page 0x88 states we have two ports, one
1908 * real and a fake port with no device connected.
1909 * So we create two port groups with one port each
1910 * and set the group with port B to unavailable.
1912 port_a = 0x1; /* relative port A */
1913 port_b = 0x2; /* relative port B */
1914 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1915 (devip->channel & 0x7f);
1916 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1917 (devip->channel & 0x7f) + 0x80;
1920 * The asymmetric access state is cycled according to the host_id.
1923 if (sdebug_vpd_use_hostno == 0) {
1924 arr[n++] = host_no % 3; /* Asymm access state */
1925 arr[n++] = 0x0F; /* claim: all states are supported */
1927 arr[n++] = 0x0; /* Active/Optimized path */
1928 arr[n++] = 0x01; /* only support active/optimized paths */
1930 put_unaligned_be16(port_group_a, arr + n);
1932 arr[n++] = 0; /* Reserved */
1933 arr[n++] = 0; /* Status code */
1934 arr[n++] = 0; /* Vendor unique */
1935 arr[n++] = 0x1; /* One port per group */
1936 arr[n++] = 0; /* Reserved */
1937 arr[n++] = 0; /* Reserved */
1938 put_unaligned_be16(port_a, arr + n);
1940 arr[n++] = 3; /* Port unavailable */
1941 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1942 put_unaligned_be16(port_group_b, arr + n);
1944 arr[n++] = 0; /* Reserved */
1945 arr[n++] = 0; /* Status code */
1946 arr[n++] = 0; /* Vendor unique */
1947 arr[n++] = 0x1; /* One port per group */
1948 arr[n++] = 0; /* Reserved */
1949 arr[n++] = 0; /* Reserved */
1950 put_unaligned_be16(port_b, arr + n);
1954 put_unaligned_be32(rlen, arr + 0);
1957 * Return the smallest value of either
1958 * - The allocated length
1959 * - The constructed command length
1960 * - The maximum array size
1962 rlen = min_t(int, alen, n);
1963 ret = fill_from_dev_buffer(scp, arr,
1964 min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1969 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1970 struct sdebug_dev_info *devip)
1973 u8 reporting_opts, req_opcode, sdeb_i, supp;
1975 u32 alloc_len, a_len;
1976 int k, offset, len, errsts, count, bump, na;
1977 const struct opcode_info_t *oip;
1978 const struct opcode_info_t *r_oip;
1980 u8 *cmd = scp->cmnd;
1982 rctd = !!(cmd[2] & 0x80);
1983 reporting_opts = cmd[2] & 0x7;
1984 req_opcode = cmd[3];
1985 req_sa = get_unaligned_be16(cmd + 4);
1986 alloc_len = get_unaligned_be32(cmd + 6);
1987 if (alloc_len < 4 || alloc_len > 0xffff) {
1988 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1989 return check_condition_result;
1991 if (alloc_len > 8192)
1995 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1997 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1999 return check_condition_result;
2001 switch (reporting_opts) {
2002 case 0: /* all commands */
2003 /* count number of commands */
2004 for (count = 0, oip = opcode_info_arr;
2005 oip->num_attached != 0xff; ++oip) {
2006 if (F_INV_OP & oip->flags)
2008 count += (oip->num_attached + 1);
2010 bump = rctd ? 20 : 8;
2011 put_unaligned_be32(count * bump, arr);
2012 for (offset = 4, oip = opcode_info_arr;
2013 oip->num_attached != 0xff && offset < a_len; ++oip) {
2014 if (F_INV_OP & oip->flags)
2016 na = oip->num_attached;
2017 arr[offset] = oip->opcode;
2018 put_unaligned_be16(oip->sa, arr + offset + 2);
2020 arr[offset + 5] |= 0x2;
2021 if (FF_SA & oip->flags)
2022 arr[offset + 5] |= 0x1;
2023 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2025 put_unaligned_be16(0xa, arr + offset + 8);
2027 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2028 if (F_INV_OP & oip->flags)
2031 arr[offset] = oip->opcode;
2032 put_unaligned_be16(oip->sa, arr + offset + 2);
2034 arr[offset + 5] |= 0x2;
2035 if (FF_SA & oip->flags)
2036 arr[offset + 5] |= 0x1;
2037 put_unaligned_be16(oip->len_mask[0],
2040 put_unaligned_be16(0xa,
2047 case 1: /* one command: opcode only */
2048 case 2: /* one command: opcode plus service action */
2049 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2050 sdeb_i = opcode_ind_arr[req_opcode];
2051 oip = &opcode_info_arr[sdeb_i];
2052 if (F_INV_OP & oip->flags) {
2056 if (1 == reporting_opts) {
2057 if (FF_SA & oip->flags) {
2058 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2061 return check_condition_result;
2064 } else if (2 == reporting_opts &&
2065 0 == (FF_SA & oip->flags)) {
2066 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2067 kfree(arr); /* point at requested sa */
2068 return check_condition_result;
2070 if (0 == (FF_SA & oip->flags) &&
2071 req_opcode == oip->opcode)
2073 else if (0 == (FF_SA & oip->flags)) {
2074 na = oip->num_attached;
2075 for (k = 0, oip = oip->arrp; k < na;
2077 if (req_opcode == oip->opcode)
2080 supp = (k >= na) ? 1 : 3;
2081 } else if (req_sa != oip->sa) {
2082 na = oip->num_attached;
2083 for (k = 0, oip = oip->arrp; k < na;
2085 if (req_sa == oip->sa)
2088 supp = (k >= na) ? 1 : 3;
2092 u = oip->len_mask[0];
2093 put_unaligned_be16(u, arr + 2);
2094 arr[4] = oip->opcode;
2095 for (k = 1; k < u; ++k)
2096 arr[4 + k] = (k < 16) ?
2097 oip->len_mask[k] : 0xff;
2102 arr[1] = (rctd ? 0x80 : 0) | supp;
2104 put_unaligned_be16(0xa, arr + offset);
2109 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2111 return check_condition_result;
2113 offset = (offset < a_len) ? offset : a_len;
2114 len = (offset < alloc_len) ? offset : alloc_len;
2115 errsts = fill_from_dev_buffer(scp, arr, len);
2120 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2121 struct sdebug_dev_info *devip)
2126 u8 *cmd = scp->cmnd;
2128 memset(arr, 0, sizeof(arr));
2129 repd = !!(cmd[2] & 0x80);
2130 alloc_len = get_unaligned_be32(cmd + 6);
2131 if (alloc_len < 4) {
2132 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2133 return check_condition_result;
2135 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2136 arr[1] = 0x1; /* ITNRS */
2143 len = (len < alloc_len) ? len : alloc_len;
2144 return fill_from_dev_buffer(scp, arr, len);
2147 /* <<Following mode page info copied from ST318451LW>> */
2149 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2150 { /* Read-Write Error Recovery page for mode_sense */
2151 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2154 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2156 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2157 return sizeof(err_recov_pg);
2160 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2161 { /* Disconnect-Reconnect page for mode_sense */
2162 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2163 0, 0, 0, 0, 0, 0, 0, 0};
2165 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2167 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2168 return sizeof(disconnect_pg);
2171 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2172 { /* Format device page for mode_sense */
2173 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2174 0, 0, 0, 0, 0, 0, 0, 0,
2175 0, 0, 0, 0, 0x40, 0, 0, 0};
2177 memcpy(p, format_pg, sizeof(format_pg));
2178 put_unaligned_be16(sdebug_sectors_per, p + 10);
2179 put_unaligned_be16(sdebug_sector_size, p + 12);
2180 if (sdebug_removable)
2181 p[20] |= 0x20; /* should agree with INQUIRY */
2183 memset(p + 2, 0, sizeof(format_pg) - 2);
2184 return sizeof(format_pg);
2187 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2188 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2191 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2192 { /* Caching page for mode_sense */
2193 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2194 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2195 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2196 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2198 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2199 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2200 memcpy(p, caching_pg, sizeof(caching_pg));
2202 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2203 else if (2 == pcontrol)
2204 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2205 return sizeof(caching_pg);
2208 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2211 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2212 { /* Control mode page for mode_sense */
2213 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2215 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2219 ctrl_m_pg[2] |= 0x4;
2221 ctrl_m_pg[2] &= ~0x4;
2224 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2226 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2228 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2229 else if (2 == pcontrol)
2230 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2231 return sizeof(ctrl_m_pg);
2235 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2236 { /* Informational Exceptions control mode page for mode_sense */
2237 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2239 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2242 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2244 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2245 else if (2 == pcontrol)
2246 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2247 return sizeof(iec_m_pg);
2250 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2251 { /* SAS SSP mode page - short format for mode_sense */
2252 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2253 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2255 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2257 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2258 return sizeof(sas_sf_m_pg);
2262 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2264 { /* SAS phy control and discover mode page for mode_sense */
2265 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2266 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2267 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2268 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2269 0x2, 0, 0, 0, 0, 0, 0, 0,
2270 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2271 0, 0, 0, 0, 0, 0, 0, 0,
2272 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2273 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2274 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2275 0x3, 0, 0, 0, 0, 0, 0, 0,
2276 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2277 0, 0, 0, 0, 0, 0, 0, 0,
2281 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2282 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2283 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2284 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2285 port_a = target_dev_id + 1;
2286 port_b = port_a + 1;
2287 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2288 put_unaligned_be32(port_a, p + 20);
2289 put_unaligned_be32(port_b, p + 48 + 20);
2291 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2292 return sizeof(sas_pcd_m_pg);
2295 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2296 { /* SAS SSP shared protocol specific port mode subpage */
2297 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2298 0, 0, 0, 0, 0, 0, 0, 0,
2301 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2303 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2304 return sizeof(sas_sha_m_pg);
2307 #define SDEBUG_MAX_MSENSE_SZ 256
2309 static int resp_mode_sense(struct scsi_cmnd *scp,
2310 struct sdebug_dev_info *devip)
2312 int pcontrol, pcode, subpcode, bd_len;
2313 unsigned char dev_spec;
2314 int alloc_len, offset, len, target_dev_id;
2315 int target = scp->device->id;
2317 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2318 unsigned char *cmd = scp->cmnd;
2319 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2321 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2322 pcontrol = (cmd[2] & 0xc0) >> 6;
2323 pcode = cmd[2] & 0x3f;
2325 msense_6 = (MODE_SENSE == cmd[0]);
2326 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2327 is_disk = (sdebug_ptype == TYPE_DISK);
2328 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2329 if ((is_disk || is_zbc) && !dbd)
2330 bd_len = llbaa ? 16 : 8;
2333 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2334 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2335 if (0x3 == pcontrol) { /* Saving values not supported */
2336 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2337 return check_condition_result;
2339 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2340 (devip->target * 1000) - 3;
2341 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2342 if (is_disk || is_zbc) {
2343 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2355 arr[4] = 0x1; /* set LONGLBA bit */
2356 arr[7] = bd_len; /* assume 255 or less */
2360 if ((bd_len > 0) && (!sdebug_capacity))
2361 sdebug_capacity = get_sdebug_capacity();
2364 if (sdebug_capacity > 0xfffffffe)
2365 put_unaligned_be32(0xffffffff, ap + 0);
2367 put_unaligned_be32(sdebug_capacity, ap + 0);
2368 put_unaligned_be16(sdebug_sector_size, ap + 6);
2371 } else if (16 == bd_len) {
2372 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2373 put_unaligned_be32(sdebug_sector_size, ap + 12);
2378 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2379 /* TODO: Control Extension page */
2380 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2381 return check_condition_result;
2386 case 0x1: /* Read-Write error recovery page, direct access */
2387 len = resp_err_recov_pg(ap, pcontrol, target);
2390 case 0x2: /* Disconnect-Reconnect page, all devices */
2391 len = resp_disconnect_pg(ap, pcontrol, target);
2394 case 0x3: /* Format device page, direct access */
2396 len = resp_format_pg(ap, pcontrol, target);
2401 case 0x8: /* Caching page, direct access */
2402 if (is_disk || is_zbc) {
2403 len = resp_caching_pg(ap, pcontrol, target);
2408 case 0xa: /* Control Mode page, all devices */
2409 len = resp_ctrl_m_pg(ap, pcontrol, target);
2412 case 0x19: /* if spc==1 then sas phy, control+discover */
2413 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2414 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2415 return check_condition_result;
2418 if ((0x0 == subpcode) || (0xff == subpcode))
2419 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2420 if ((0x1 == subpcode) || (0xff == subpcode))
2421 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2423 if ((0x2 == subpcode) || (0xff == subpcode))
2424 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2427 case 0x1c: /* Informational Exceptions Mode page, all devices */
2428 len = resp_iec_m_pg(ap, pcontrol, target);
2431 case 0x3f: /* Read all Mode pages */
2432 if ((0 == subpcode) || (0xff == subpcode)) {
2433 len = resp_err_recov_pg(ap, pcontrol, target);
2434 len += resp_disconnect_pg(ap + len, pcontrol, target);
2436 len += resp_format_pg(ap + len, pcontrol,
2438 len += resp_caching_pg(ap + len, pcontrol,
2440 } else if (is_zbc) {
2441 len += resp_caching_pg(ap + len, pcontrol,
2444 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2445 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2446 if (0xff == subpcode) {
2447 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2448 target, target_dev_id);
2449 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2451 len += resp_iec_m_pg(ap + len, pcontrol, target);
2454 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2455 return check_condition_result;
2463 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2464 return check_condition_result;
2467 arr[0] = offset - 1;
2469 put_unaligned_be16((offset - 2), arr + 0);
2470 return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2473 #define SDEBUG_MAX_MSELECT_SZ 512
2475 static int resp_mode_select(struct scsi_cmnd *scp,
2476 struct sdebug_dev_info *devip)
2478 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2479 int param_len, res, mpage;
2480 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2481 unsigned char *cmd = scp->cmnd;
2482 int mselect6 = (MODE_SELECT == cmd[0]);
2484 memset(arr, 0, sizeof(arr));
2487 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2488 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2489 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2490 return check_condition_result;
2492 res = fetch_to_dev_buffer(scp, arr, param_len);
2494 return DID_ERROR << 16;
2495 else if (sdebug_verbose && (res < param_len))
2496 sdev_printk(KERN_INFO, scp->device,
2497 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2498 __func__, param_len, res);
2499 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2500 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2502 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2503 return check_condition_result;
2505 off = bd_len + (mselect6 ? 4 : 8);
2506 mpage = arr[off] & 0x3f;
2507 ps = !!(arr[off] & 0x80);
2509 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2510 return check_condition_result;
2512 spf = !!(arr[off] & 0x40);
2513 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2515 if ((pg_len + off) > param_len) {
2516 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2517 PARAMETER_LIST_LENGTH_ERR, 0);
2518 return check_condition_result;
2521 case 0x8: /* Caching Mode page */
2522 if (caching_pg[1] == arr[off + 1]) {
2523 memcpy(caching_pg + 2, arr + off + 2,
2524 sizeof(caching_pg) - 2);
2525 goto set_mode_changed_ua;
2528 case 0xa: /* Control Mode page */
2529 if (ctrl_m_pg[1] == arr[off + 1]) {
2530 memcpy(ctrl_m_pg + 2, arr + off + 2,
2531 sizeof(ctrl_m_pg) - 2);
2532 if (ctrl_m_pg[4] & 0x8)
2536 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2537 goto set_mode_changed_ua;
2540 case 0x1c: /* Informational Exceptions Mode page */
2541 if (iec_m_pg[1] == arr[off + 1]) {
2542 memcpy(iec_m_pg + 2, arr + off + 2,
2543 sizeof(iec_m_pg) - 2);
2544 goto set_mode_changed_ua;
2550 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2551 return check_condition_result;
2552 set_mode_changed_ua:
2553 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2557 static int resp_temp_l_pg(unsigned char *arr)
2559 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2560 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2563 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2564 return sizeof(temp_l_pg);
2567 static int resp_ie_l_pg(unsigned char *arr)
2569 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2572 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2573 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2574 arr[4] = THRESHOLD_EXCEEDED;
2577 return sizeof(ie_l_pg);
2580 #define SDEBUG_MAX_LSENSE_SZ 512
2582 static int resp_log_sense(struct scsi_cmnd *scp,
2583 struct sdebug_dev_info *devip)
2585 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2586 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2587 unsigned char *cmd = scp->cmnd;
2589 memset(arr, 0, sizeof(arr));
2593 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2594 return check_condition_result;
2596 pcode = cmd[2] & 0x3f;
2597 subpcode = cmd[3] & 0xff;
2598 alloc_len = get_unaligned_be16(cmd + 7);
2600 if (0 == subpcode) {
2602 case 0x0: /* Supported log pages log page */
2604 arr[n++] = 0x0; /* this page */
2605 arr[n++] = 0xd; /* Temperature */
2606 arr[n++] = 0x2f; /* Informational exceptions */
2609 case 0xd: /* Temperature log page */
2610 arr[3] = resp_temp_l_pg(arr + 4);
2612 case 0x2f: /* Informational exceptions log page */
2613 arr[3] = resp_ie_l_pg(arr + 4);
2616 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2617 return check_condition_result;
2619 } else if (0xff == subpcode) {
2623 case 0x0: /* Supported log pages and subpages log page */
2626 arr[n++] = 0x0; /* 0,0 page */
2628 arr[n++] = 0xff; /* this page */
2630 arr[n++] = 0x0; /* Temperature */
2632 arr[n++] = 0x0; /* Informational exceptions */
2635 case 0xd: /* Temperature subpages */
2638 arr[n++] = 0x0; /* Temperature */
2641 case 0x2f: /* Informational exceptions subpages */
2644 arr[n++] = 0x0; /* Informational exceptions */
2648 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2649 return check_condition_result;
2652 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2653 return check_condition_result;
2655 len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2656 return fill_from_dev_buffer(scp, arr,
2657 min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2660 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2662 return devip->nr_zones != 0;
2665 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2666 unsigned long long lba)
2668 return &devip->zstate[lba >> devip->zsize_shift];
2671 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2673 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2676 static void zbc_close_zone(struct sdebug_dev_info *devip,
2677 struct sdeb_zone_state *zsp)
2679 enum sdebug_z_cond zc;
2681 if (zbc_zone_is_conv(zsp))
2685 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2688 if (zc == ZC2_IMPLICIT_OPEN)
2689 devip->nr_imp_open--;
2691 devip->nr_exp_open--;
2693 if (zsp->z_wp == zsp->z_start) {
2694 zsp->z_cond = ZC1_EMPTY;
2696 zsp->z_cond = ZC4_CLOSED;
2701 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2703 struct sdeb_zone_state *zsp = &devip->zstate[0];
2706 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2707 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2708 zbc_close_zone(devip, zsp);
2714 static void zbc_open_zone(struct sdebug_dev_info *devip,
2715 struct sdeb_zone_state *zsp, bool explicit)
2717 enum sdebug_z_cond zc;
2719 if (zbc_zone_is_conv(zsp))
2723 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2724 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2727 /* Close an implicit open zone if necessary */
2728 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2729 zbc_close_zone(devip, zsp);
2730 else if (devip->max_open &&
2731 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2732 zbc_close_imp_open_zone(devip);
2734 if (zsp->z_cond == ZC4_CLOSED)
2737 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2738 devip->nr_exp_open++;
2740 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2741 devip->nr_imp_open++;
2745 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2746 unsigned long long lba, unsigned int num)
2748 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2749 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2751 if (zbc_zone_is_conv(zsp))
2754 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2756 if (zsp->z_wp >= zend)
2757 zsp->z_cond = ZC5_FULL;
2762 if (lba != zsp->z_wp)
2763 zsp->z_non_seq_resource = true;
2769 } else if (end > zsp->z_wp) {
2775 if (zsp->z_wp >= zend)
2776 zsp->z_cond = ZC5_FULL;
2782 zend = zsp->z_start + zsp->z_size;
2787 static int check_zbc_access_params(struct scsi_cmnd *scp,
2788 unsigned long long lba, unsigned int num, bool write)
2790 struct scsi_device *sdp = scp->device;
2791 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2792 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2793 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2796 if (devip->zmodel == BLK_ZONED_HA)
2798 /* For host-managed, reads cannot cross zone types boundaries */
2799 if (zsp_end != zsp &&
2800 zbc_zone_is_conv(zsp) &&
2801 !zbc_zone_is_conv(zsp_end)) {
2802 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2805 return check_condition_result;
2810 /* No restrictions for writes within conventional zones */
2811 if (zbc_zone_is_conv(zsp)) {
2812 if (!zbc_zone_is_conv(zsp_end)) {
2813 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2815 WRITE_BOUNDARY_ASCQ);
2816 return check_condition_result;
2821 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2822 /* Writes cannot cross sequential zone boundaries */
2823 if (zsp_end != zsp) {
2824 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2826 WRITE_BOUNDARY_ASCQ);
2827 return check_condition_result;
2829 /* Cannot write full zones */
2830 if (zsp->z_cond == ZC5_FULL) {
2831 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2832 INVALID_FIELD_IN_CDB, 0);
2833 return check_condition_result;
2835 /* Writes must be aligned to the zone WP */
2836 if (lba != zsp->z_wp) {
2837 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2839 UNALIGNED_WRITE_ASCQ);
2840 return check_condition_result;
2844 /* Handle implicit open of closed and empty zones */
2845 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2846 if (devip->max_open &&
2847 devip->nr_exp_open >= devip->max_open) {
2848 mk_sense_buffer(scp, DATA_PROTECT,
2851 return check_condition_result;
2853 zbc_open_zone(devip, zsp, false);
2859 static inline int check_device_access_params
2860 (struct scsi_cmnd *scp, unsigned long long lba,
2861 unsigned int num, bool write)
2863 struct scsi_device *sdp = scp->device;
2864 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2866 if (lba + num > sdebug_capacity) {
2867 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2868 return check_condition_result;
2870 /* transfer length excessive (tie in to block limits VPD page) */
2871 if (num > sdebug_store_sectors) {
2872 /* needs work to find which cdb byte 'num' comes from */
2873 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2874 return check_condition_result;
2876 if (write && unlikely(sdebug_wp)) {
2877 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2878 return check_condition_result;
2880 if (sdebug_dev_is_zoned(devip))
2881 return check_zbc_access_params(scp, lba, num, write);
2887 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2888 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2889 * that access any of the "stores" in struct sdeb_store_info should call this
2890 * function with bug_if_fake_rw set to true.
2892 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2893 bool bug_if_fake_rw)
2895 if (sdebug_fake_rw) {
2896 BUG_ON(bug_if_fake_rw); /* See note above */
2899 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2902 /* Returns number of bytes copied or -1 if error. */
2903 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2904 u32 sg_skip, u64 lba, u32 num, bool do_write)
2907 u64 block, rest = 0;
2908 enum dma_data_direction dir;
2909 struct scsi_data_buffer *sdb = &scp->sdb;
2913 dir = DMA_TO_DEVICE;
2914 write_since_sync = true;
2916 dir = DMA_FROM_DEVICE;
2919 if (!sdb->length || !sip)
2921 if (scp->sc_data_direction != dir)
2925 block = do_div(lba, sdebug_store_sectors);
2926 if (block + num > sdebug_store_sectors)
2927 rest = block + num - sdebug_store_sectors;
2929 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2930 fsp + (block * sdebug_sector_size),
2931 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2932 if (ret != (num - rest) * sdebug_sector_size)
2936 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2937 fsp, rest * sdebug_sector_size,
2938 sg_skip + ((num - rest) * sdebug_sector_size),
2945 /* Returns number of bytes copied or -1 if error. */
2946 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2948 struct scsi_data_buffer *sdb = &scp->sdb;
2952 if (scp->sc_data_direction != DMA_TO_DEVICE)
2954 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2955 num * sdebug_sector_size, 0, true);
2958 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2959 * arr into sip->storep+lba and return true. If comparison fails then
2961 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2962 const u8 *arr, bool compare_only)
2965 u64 block, rest = 0;
2966 u32 store_blks = sdebug_store_sectors;
2967 u32 lb_size = sdebug_sector_size;
2968 u8 *fsp = sip->storep;
2970 block = do_div(lba, store_blks);
2971 if (block + num > store_blks)
2972 rest = block + num - store_blks;
2974 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2978 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2984 arr += num * lb_size;
2985 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2987 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2991 static __be16 dif_compute_csum(const void *buf, int len)
2996 csum = (__force __be16)ip_compute_csum(buf, len);
2998 csum = cpu_to_be16(crc_t10dif(buf, len));
3003 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3004 sector_t sector, u32 ei_lba)
3006 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3008 if (sdt->guard_tag != csum) {
3009 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3010 (unsigned long)sector,
3011 be16_to_cpu(sdt->guard_tag),
3015 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3016 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3017 pr_err("REF check failed on sector %lu\n",
3018 (unsigned long)sector);
3021 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3022 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3023 pr_err("REF check failed on sector %lu\n",
3024 (unsigned long)sector);
3030 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3031 unsigned int sectors, bool read)
3035 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3036 scp->device->hostdata, true);
3037 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3038 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3039 struct sg_mapping_iter miter;
3041 /* Bytes of protection data to copy into sgl */
3042 resid = sectors * sizeof(*dif_storep);
3044 sg_miter_start(&miter, scsi_prot_sglist(scp),
3045 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3046 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3048 while (sg_miter_next(&miter) && resid > 0) {
3049 size_t len = min_t(size_t, miter.length, resid);
3050 void *start = dif_store(sip, sector);
3053 if (dif_store_end < start + len)
3054 rest = start + len - dif_store_end;
3059 memcpy(paddr, start, len - rest);
3061 memcpy(start, paddr, len - rest);
3065 memcpy(paddr + len - rest, dif_storep, rest);
3067 memcpy(dif_storep, paddr + len - rest, rest);
3070 sector += len / sizeof(*dif_storep);
3073 sg_miter_stop(&miter);
3076 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3077 unsigned int sectors, u32 ei_lba)
3082 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3083 scp->device->hostdata, true);
3084 struct t10_pi_tuple *sdt;
3086 for (i = 0; i < sectors; i++, ei_lba++) {
3087 sector = start_sec + i;
3088 sdt = dif_store(sip, sector);
3090 if (sdt->app_tag == cpu_to_be16(0xffff))
3094 * Because scsi_debug acts as both initiator and
3095 * target we proceed to verify the PI even if
3096 * RDPROTECT=3. This is done so the "initiator" knows
3097 * which type of error to return. Otherwise we would
3098 * have to iterate over the PI twice.
3100 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3101 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3110 dif_copy_prot(scp, start_sec, sectors, true);
3116 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3123 struct sdeb_store_info *sip = devip2sip(devip, true);
3124 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3125 u8 *cmd = scp->cmnd;
3130 lba = get_unaligned_be64(cmd + 2);
3131 num = get_unaligned_be32(cmd + 10);
3136 lba = get_unaligned_be32(cmd + 2);
3137 num = get_unaligned_be16(cmd + 7);
3142 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3143 (u32)(cmd[1] & 0x1f) << 16;
3144 num = (0 == cmd[4]) ? 256 : cmd[4];
3149 lba = get_unaligned_be32(cmd + 2);
3150 num = get_unaligned_be32(cmd + 6);
3153 case XDWRITEREAD_10:
3155 lba = get_unaligned_be32(cmd + 2);
3156 num = get_unaligned_be16(cmd + 7);
3159 default: /* assume READ(32) */
3160 lba = get_unaligned_be64(cmd + 12);
3161 ei_lba = get_unaligned_be32(cmd + 20);
3162 num = get_unaligned_be32(cmd + 28);
3166 if (unlikely(have_dif_prot && check_prot)) {
3167 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3169 mk_sense_invalid_opcode(scp);
3170 return check_condition_result;
3172 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3173 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3174 (cmd[1] & 0xe0) == 0)
3175 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3178 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3179 atomic_read(&sdeb_inject_pending))) {
3181 atomic_set(&sdeb_inject_pending, 0);
3184 ret = check_device_access_params(scp, lba, num, false);
3187 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3188 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3189 ((lba + num) > sdebug_medium_error_start))) {
3190 /* claim unrecoverable read error */
3191 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3192 /* set info field and valid bit for fixed descriptor */
3193 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3194 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3195 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3196 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3197 put_unaligned_be32(ret, scp->sense_buffer + 3);
3199 scsi_set_resid(scp, scsi_bufflen(scp));
3200 return check_condition_result;
3203 read_lock(macc_lckp);
3206 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3207 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3208 case 1: /* Guard tag error */
3209 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3210 read_unlock(macc_lckp);
3211 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3212 return check_condition_result;
3213 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3214 read_unlock(macc_lckp);
3215 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3216 return illegal_condition_result;
3219 case 3: /* Reference tag error */
3220 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3221 read_unlock(macc_lckp);
3222 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3223 return check_condition_result;
3224 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3225 read_unlock(macc_lckp);
3226 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3227 return illegal_condition_result;
3233 ret = do_device_access(sip, scp, 0, lba, num, false);
3234 read_unlock(macc_lckp);
3235 if (unlikely(ret == -1))
3236 return DID_ERROR << 16;
3238 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3240 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3241 atomic_read(&sdeb_inject_pending))) {
3242 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3243 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3244 atomic_set(&sdeb_inject_pending, 0);
3245 return check_condition_result;
3246 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3247 /* Logical block guard check failed */
3248 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3249 atomic_set(&sdeb_inject_pending, 0);
3250 return illegal_condition_result;
3251 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3252 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3253 atomic_set(&sdeb_inject_pending, 0);
3254 return illegal_condition_result;
3260 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3261 unsigned int sectors, u32 ei_lba)
3264 struct t10_pi_tuple *sdt;
3266 sector_t sector = start_sec;
3269 struct sg_mapping_iter diter;
3270 struct sg_mapping_iter piter;
3272 BUG_ON(scsi_sg_count(SCpnt) == 0);
3273 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3275 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3276 scsi_prot_sg_count(SCpnt),
3277 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3278 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3279 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3281 /* For each protection page */
3282 while (sg_miter_next(&piter)) {
3284 if (WARN_ON(!sg_miter_next(&diter))) {
3289 for (ppage_offset = 0; ppage_offset < piter.length;
3290 ppage_offset += sizeof(struct t10_pi_tuple)) {
3291 /* If we're at the end of the current
3292 * data page advance to the next one
3294 if (dpage_offset >= diter.length) {
3295 if (WARN_ON(!sg_miter_next(&diter))) {
3302 sdt = piter.addr + ppage_offset;
3303 daddr = diter.addr + dpage_offset;
3305 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3306 ret = dif_verify(sdt, daddr, sector, ei_lba);
3313 dpage_offset += sdebug_sector_size;
3315 diter.consumed = dpage_offset;
3316 sg_miter_stop(&diter);
3318 sg_miter_stop(&piter);
3320 dif_copy_prot(SCpnt, start_sec, sectors, false);
3327 sg_miter_stop(&diter);
3328 sg_miter_stop(&piter);
3332 static unsigned long lba_to_map_index(sector_t lba)
3334 if (sdebug_unmap_alignment)
3335 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3336 sector_div(lba, sdebug_unmap_granularity);
3340 static sector_t map_index_to_lba(unsigned long index)
3342 sector_t lba = index * sdebug_unmap_granularity;
3344 if (sdebug_unmap_alignment)
3345 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3349 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3353 unsigned int mapped;
3354 unsigned long index;
3357 index = lba_to_map_index(lba);
3358 mapped = test_bit(index, sip->map_storep);
3361 next = find_next_zero_bit(sip->map_storep, map_size, index);
3363 next = find_next_bit(sip->map_storep, map_size, index);
3365 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3370 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3373 sector_t end = lba + len;
3376 unsigned long index = lba_to_map_index(lba);
3378 if (index < map_size)
3379 set_bit(index, sip->map_storep);
3381 lba = map_index_to_lba(index + 1);
3385 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3388 sector_t end = lba + len;
3389 u8 *fsp = sip->storep;
3392 unsigned long index = lba_to_map_index(lba);
3394 if (lba == map_index_to_lba(index) &&
3395 lba + sdebug_unmap_granularity <= end &&
3397 clear_bit(index, sip->map_storep);
3398 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3399 memset(fsp + lba * sdebug_sector_size,
3400 (sdebug_lbprz & 1) ? 0 : 0xff,
3401 sdebug_sector_size *
3402 sdebug_unmap_granularity);
3404 if (sip->dif_storep) {
3405 memset(sip->dif_storep + lba, 0xff,
3406 sizeof(*sip->dif_storep) *
3407 sdebug_unmap_granularity);
3410 lba = map_index_to_lba(index + 1);
3414 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3421 struct sdeb_store_info *sip = devip2sip(devip, true);
3422 rwlock_t *macc_lckp = &sip->macc_lck;
3423 u8 *cmd = scp->cmnd;
3428 lba = get_unaligned_be64(cmd + 2);
3429 num = get_unaligned_be32(cmd + 10);
3434 lba = get_unaligned_be32(cmd + 2);
3435 num = get_unaligned_be16(cmd + 7);
3440 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3441 (u32)(cmd[1] & 0x1f) << 16;
3442 num = (0 == cmd[4]) ? 256 : cmd[4];
3447 lba = get_unaligned_be32(cmd + 2);
3448 num = get_unaligned_be32(cmd + 6);
3451 case 0x53: /* XDWRITEREAD(10) */
3453 lba = get_unaligned_be32(cmd + 2);
3454 num = get_unaligned_be16(cmd + 7);
3457 default: /* assume WRITE(32) */
3458 lba = get_unaligned_be64(cmd + 12);
3459 ei_lba = get_unaligned_be32(cmd + 20);
3460 num = get_unaligned_be32(cmd + 28);
3464 if (unlikely(have_dif_prot && check_prot)) {
3465 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3467 mk_sense_invalid_opcode(scp);
3468 return check_condition_result;
3470 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3471 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3472 (cmd[1] & 0xe0) == 0)
3473 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3477 write_lock(macc_lckp);
3478 ret = check_device_access_params(scp, lba, num, true);
3480 write_unlock(macc_lckp);
3485 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3486 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3487 case 1: /* Guard tag error */
3488 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3489 write_unlock(macc_lckp);
3490 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3491 return illegal_condition_result;
3492 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3493 write_unlock(macc_lckp);
3494 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3495 return check_condition_result;
3498 case 3: /* Reference tag error */
3499 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3500 write_unlock(macc_lckp);
3501 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3502 return illegal_condition_result;
3503 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3504 write_unlock(macc_lckp);
3505 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3506 return check_condition_result;
3512 ret = do_device_access(sip, scp, 0, lba, num, true);
3513 if (unlikely(scsi_debug_lbp()))
3514 map_region(sip, lba, num);
3515 /* If ZBC zone then bump its write pointer */
3516 if (sdebug_dev_is_zoned(devip))
3517 zbc_inc_wp(devip, lba, num);
3518 write_unlock(macc_lckp);
3519 if (unlikely(-1 == ret))
3520 return DID_ERROR << 16;
3521 else if (unlikely(sdebug_verbose &&
3522 (ret < (num * sdebug_sector_size))))
3523 sdev_printk(KERN_INFO, scp->device,
3524 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3525 my_name, num * sdebug_sector_size, ret);
3527 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3528 atomic_read(&sdeb_inject_pending))) {
3529 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3530 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3531 atomic_set(&sdeb_inject_pending, 0);
3532 return check_condition_result;
3533 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3534 /* Logical block guard check failed */
3535 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3536 atomic_set(&sdeb_inject_pending, 0);
3537 return illegal_condition_result;
3538 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3539 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3540 atomic_set(&sdeb_inject_pending, 0);
3541 return illegal_condition_result;
3548 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3549 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3551 static int resp_write_scat(struct scsi_cmnd *scp,
3552 struct sdebug_dev_info *devip)
3554 u8 *cmd = scp->cmnd;
3557 struct sdeb_store_info *sip = devip2sip(devip, true);
3558 rwlock_t *macc_lckp = &sip->macc_lck;
3560 u16 lbdof, num_lrd, k;
3561 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3562 u32 lb_size = sdebug_sector_size;
3567 static const u32 lrd_size = 32; /* + parameter list header size */
3569 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3571 wrprotect = (cmd[10] >> 5) & 0x7;
3572 lbdof = get_unaligned_be16(cmd + 12);
3573 num_lrd = get_unaligned_be16(cmd + 16);
3574 bt_len = get_unaligned_be32(cmd + 28);
3575 } else { /* that leaves WRITE SCATTERED(16) */
3577 wrprotect = (cmd[2] >> 5) & 0x7;
3578 lbdof = get_unaligned_be16(cmd + 4);
3579 num_lrd = get_unaligned_be16(cmd + 8);
3580 bt_len = get_unaligned_be32(cmd + 10);
3581 if (unlikely(have_dif_prot)) {
3582 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3584 mk_sense_invalid_opcode(scp);
3585 return illegal_condition_result;
3587 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3588 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3590 sdev_printk(KERN_ERR, scp->device,
3591 "Unprotected WR to DIF device\n");
3594 if ((num_lrd == 0) || (bt_len == 0))
3595 return 0; /* T10 says these do-nothings are not errors */
3598 sdev_printk(KERN_INFO, scp->device,
3599 "%s: %s: LB Data Offset field bad\n",
3601 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3602 return illegal_condition_result;
3604 lbdof_blen = lbdof * lb_size;
3605 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3607 sdev_printk(KERN_INFO, scp->device,
3608 "%s: %s: LBA range descriptors don't fit\n",
3610 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3611 return illegal_condition_result;
3613 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3615 return SCSI_MLQUEUE_HOST_BUSY;
3617 sdev_printk(KERN_INFO, scp->device,
3618 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3619 my_name, __func__, lbdof_blen);
3620 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3622 ret = DID_ERROR << 16;
3626 write_lock(macc_lckp);
3627 sg_off = lbdof_blen;
3628 /* Spec says Buffer xfer Length field in number of LBs in dout */
3630 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3631 lba = get_unaligned_be64(up + 0);
3632 num = get_unaligned_be32(up + 8);
3634 sdev_printk(KERN_INFO, scp->device,
3635 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3636 my_name, __func__, k, lba, num, sg_off);
3639 ret = check_device_access_params(scp, lba, num, true);
3641 goto err_out_unlock;
3642 num_by = num * lb_size;
3643 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3645 if ((cum_lb + num) > bt_len) {
3647 sdev_printk(KERN_INFO, scp->device,
3648 "%s: %s: sum of blocks > data provided\n",
3650 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3652 ret = illegal_condition_result;
3653 goto err_out_unlock;
3657 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3658 int prot_ret = prot_verify_write(scp, lba, num,
3662 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3664 ret = illegal_condition_result;
3665 goto err_out_unlock;
3669 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3670 /* If ZBC zone then bump its write pointer */
3671 if (sdebug_dev_is_zoned(devip))
3672 zbc_inc_wp(devip, lba, num);
3673 if (unlikely(scsi_debug_lbp()))
3674 map_region(sip, lba, num);
3675 if (unlikely(-1 == ret)) {
3676 ret = DID_ERROR << 16;
3677 goto err_out_unlock;
3678 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3679 sdev_printk(KERN_INFO, scp->device,
3680 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3681 my_name, num_by, ret);
3683 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3684 atomic_read(&sdeb_inject_pending))) {
3685 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3686 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3687 atomic_set(&sdeb_inject_pending, 0);
3688 ret = check_condition_result;
3689 goto err_out_unlock;
3690 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3691 /* Logical block guard check failed */
3692 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3693 atomic_set(&sdeb_inject_pending, 0);
3694 ret = illegal_condition_result;
3695 goto err_out_unlock;
3696 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3697 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3698 atomic_set(&sdeb_inject_pending, 0);
3699 ret = illegal_condition_result;
3700 goto err_out_unlock;
3708 write_unlock(macc_lckp);
3714 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3715 u32 ei_lba, bool unmap, bool ndob)
3717 struct scsi_device *sdp = scp->device;
3718 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3719 unsigned long long i;
3721 u32 lb_size = sdebug_sector_size;
3723 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3724 scp->device->hostdata, true);
3725 rwlock_t *macc_lckp = &sip->macc_lck;
3729 write_lock(macc_lckp);
3731 ret = check_device_access_params(scp, lba, num, true);
3733 write_unlock(macc_lckp);
3737 if (unmap && scsi_debug_lbp()) {
3738 unmap_region(sip, lba, num);
3742 block = do_div(lbaa, sdebug_store_sectors);
3743 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3745 fs1p = fsp + (block * lb_size);
3747 memset(fs1p, 0, lb_size);
3750 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3753 write_unlock(&sip->macc_lck);
3754 return DID_ERROR << 16;
3755 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3756 sdev_printk(KERN_INFO, scp->device,
3757 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3758 my_name, "write same", lb_size, ret);
3760 /* Copy first sector to remaining blocks */
3761 for (i = 1 ; i < num ; i++) {
3763 block = do_div(lbaa, sdebug_store_sectors);
3764 memmove(fsp + (block * lb_size), fs1p, lb_size);
3766 if (scsi_debug_lbp())
3767 map_region(sip, lba, num);
3768 /* If ZBC zone then bump its write pointer */
3769 if (sdebug_dev_is_zoned(devip))
3770 zbc_inc_wp(devip, lba, num);
3772 write_unlock(macc_lckp);
3777 static int resp_write_same_10(struct scsi_cmnd *scp,
3778 struct sdebug_dev_info *devip)
3780 u8 *cmd = scp->cmnd;
3787 if (sdebug_lbpws10 == 0) {
3788 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3789 return check_condition_result;
3793 lba = get_unaligned_be32(cmd + 2);
3794 num = get_unaligned_be16(cmd + 7);
3795 if (num > sdebug_write_same_length) {
3796 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3797 return check_condition_result;
3799 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3802 static int resp_write_same_16(struct scsi_cmnd *scp,
3803 struct sdebug_dev_info *devip)
3805 u8 *cmd = scp->cmnd;
3812 if (cmd[1] & 0x8) { /* UNMAP */
3813 if (sdebug_lbpws == 0) {
3814 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3815 return check_condition_result;
3819 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3821 lba = get_unaligned_be64(cmd + 2);
3822 num = get_unaligned_be32(cmd + 10);
3823 if (num > sdebug_write_same_length) {
3824 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3825 return check_condition_result;
3827 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3830 /* Note the mode field is in the same position as the (lower) service action
3831 * field. For the Report supported operation codes command, SPC-4 suggests
3832 * each mode of this command should be reported separately; for future. */
3833 static int resp_write_buffer(struct scsi_cmnd *scp,
3834 struct sdebug_dev_info *devip)
3836 u8 *cmd = scp->cmnd;
3837 struct scsi_device *sdp = scp->device;
3838 struct sdebug_dev_info *dp;
3841 mode = cmd[1] & 0x1f;
3843 case 0x4: /* download microcode (MC) and activate (ACT) */
3844 /* set UAs on this device only */
3845 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3846 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3848 case 0x5: /* download MC, save and ACT */
3849 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3851 case 0x6: /* download MC with offsets and ACT */
3852 /* set UAs on most devices (LUs) in this target */
3853 list_for_each_entry(dp,
3854 &devip->sdbg_host->dev_info_list,
3856 if (dp->target == sdp->id) {
3857 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3859 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3863 case 0x7: /* download MC with offsets, save, and ACT */
3864 /* set UA on all devices (LUs) in this target */
3865 list_for_each_entry(dp,
3866 &devip->sdbg_host->dev_info_list,
3868 if (dp->target == sdp->id)
3869 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3873 /* do nothing for this command for other mode values */
3879 static int resp_comp_write(struct scsi_cmnd *scp,
3880 struct sdebug_dev_info *devip)
3882 u8 *cmd = scp->cmnd;
3884 struct sdeb_store_info *sip = devip2sip(devip, true);
3885 rwlock_t *macc_lckp = &sip->macc_lck;
3888 u32 lb_size = sdebug_sector_size;
3893 lba = get_unaligned_be64(cmd + 2);
3894 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3896 return 0; /* degenerate case, not an error */
3897 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3899 mk_sense_invalid_opcode(scp);
3900 return check_condition_result;
3902 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3903 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3904 (cmd[1] & 0xe0) == 0)
3905 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3907 ret = check_device_access_params(scp, lba, num, false);
3911 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3913 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3915 return check_condition_result;
3918 write_lock(macc_lckp);
3920 ret = do_dout_fetch(scp, dnum, arr);
3922 retval = DID_ERROR << 16;
3924 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3925 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3926 "indicated=%u, IO sent=%d bytes\n", my_name,
3927 dnum * lb_size, ret);
3928 if (!comp_write_worker(sip, lba, num, arr, false)) {
3929 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3930 retval = check_condition_result;
3933 if (scsi_debug_lbp())
3934 map_region(sip, lba, num);
3936 write_unlock(macc_lckp);
3941 struct unmap_block_desc {
3947 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3950 struct unmap_block_desc *desc;
3951 struct sdeb_store_info *sip = devip2sip(devip, true);
3952 rwlock_t *macc_lckp = &sip->macc_lck;
3953 unsigned int i, payload_len, descriptors;
3956 if (!scsi_debug_lbp())
3957 return 0; /* fib and say its done */
3958 payload_len = get_unaligned_be16(scp->cmnd + 7);
3959 BUG_ON(scsi_bufflen(scp) != payload_len);
3961 descriptors = (payload_len - 8) / 16;
3962 if (descriptors > sdebug_unmap_max_desc) {
3963 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3964 return check_condition_result;
3967 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3969 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3971 return check_condition_result;
3974 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3976 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3977 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3979 desc = (void *)&buf[8];
3981 write_lock(macc_lckp);
3983 for (i = 0 ; i < descriptors ; i++) {
3984 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3985 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3987 ret = check_device_access_params(scp, lba, num, true);
3991 unmap_region(sip, lba, num);
3997 write_unlock(macc_lckp);
4003 #define SDEBUG_GET_LBA_STATUS_LEN 32
4005 static int resp_get_lba_status(struct scsi_cmnd *scp,
4006 struct sdebug_dev_info *devip)
4008 u8 *cmd = scp->cmnd;
4010 u32 alloc_len, mapped, num;
4012 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4014 lba = get_unaligned_be64(cmd + 2);
4015 alloc_len = get_unaligned_be32(cmd + 10);
4020 ret = check_device_access_params(scp, lba, 1, false);
4024 if (scsi_debug_lbp()) {
4025 struct sdeb_store_info *sip = devip2sip(devip, true);
4027 mapped = map_state(sip, lba, &num);
4030 /* following just in case virtual_gb changed */
4031 sdebug_capacity = get_sdebug_capacity();
4032 if (sdebug_capacity - lba <= 0xffffffff)
4033 num = sdebug_capacity - lba;
4038 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4039 put_unaligned_be32(20, arr); /* Parameter Data Length */
4040 put_unaligned_be64(lba, arr + 8); /* LBA */
4041 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4042 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4044 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4047 static int resp_sync_cache(struct scsi_cmnd *scp,
4048 struct sdebug_dev_info *devip)
4053 u8 *cmd = scp->cmnd;
4055 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4056 lba = get_unaligned_be32(cmd + 2);
4057 num_blocks = get_unaligned_be16(cmd + 7);
4058 } else { /* SYNCHRONIZE_CACHE(16) */
4059 lba = get_unaligned_be64(cmd + 2);
4060 num_blocks = get_unaligned_be32(cmd + 10);
4062 if (lba + num_blocks > sdebug_capacity) {
4063 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4064 return check_condition_result;
4066 if (!write_since_sync || (cmd[1] & 0x2))
4067 res = SDEG_RES_IMMED_MASK;
4068 else /* delay if write_since_sync and IMMED clear */
4069 write_since_sync = false;
4074 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4075 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4076 * a GOOD status otherwise. Model a disk with a big cache and yield
4077 * CONDITION MET. Actually tries to bring range in main memory into the
4078 * cache associated with the CPU(s).
4080 static int resp_pre_fetch(struct scsi_cmnd *scp,
4081 struct sdebug_dev_info *devip)
4085 u64 block, rest = 0;
4087 u8 *cmd = scp->cmnd;
4088 struct sdeb_store_info *sip = devip2sip(devip, true);
4089 rwlock_t *macc_lckp = &sip->macc_lck;
4090 u8 *fsp = sip->storep;
4092 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4093 lba = get_unaligned_be32(cmd + 2);
4094 nblks = get_unaligned_be16(cmd + 7);
4095 } else { /* PRE-FETCH(16) */
4096 lba = get_unaligned_be64(cmd + 2);
4097 nblks = get_unaligned_be32(cmd + 10);
4099 if (lba + nblks > sdebug_capacity) {
4100 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4101 return check_condition_result;
4105 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4106 block = do_div(lba, sdebug_store_sectors);
4107 if (block + nblks > sdebug_store_sectors)
4108 rest = block + nblks - sdebug_store_sectors;
4110 /* Try to bring the PRE-FETCH range into CPU's cache */
4111 read_lock(macc_lckp);
4112 prefetch_range(fsp + (sdebug_sector_size * block),
4113 (nblks - rest) * sdebug_sector_size);
4115 prefetch_range(fsp, rest * sdebug_sector_size);
4116 read_unlock(macc_lckp);
4119 res = SDEG_RES_IMMED_MASK;
4120 return res | condition_met_result;
4123 #define RL_BUCKET_ELEMS 8
4125 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4126 * (W-LUN), the normal Linux scanning logic does not associate it with a
4127 * device (e.g. /dev/sg7). The following magic will make that association:
4128 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4129 * where <n> is a host number. If there are multiple targets in a host then
4130 * the above will associate a W-LUN to each target. To only get a W-LUN
4131 * for target 2, then use "echo '- 2 49409' > scan" .
4133 static int resp_report_luns(struct scsi_cmnd *scp,
4134 struct sdebug_dev_info *devip)
4136 unsigned char *cmd = scp->cmnd;
4137 unsigned int alloc_len;
4138 unsigned char select_report;
4140 struct scsi_lun *lun_p;
4141 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4142 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4143 unsigned int wlun_cnt; /* report luns W-LUN count */
4144 unsigned int tlun_cnt; /* total LUN count */
4145 unsigned int rlen; /* response length (in bytes) */
4147 unsigned int off_rsp = 0;
4148 const int sz_lun = sizeof(struct scsi_lun);
4150 clear_luns_changed_on_target(devip);
4152 select_report = cmd[2];
4153 alloc_len = get_unaligned_be32(cmd + 6);
4155 if (alloc_len < 4) {
4156 pr_err("alloc len too small %d\n", alloc_len);
4157 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4158 return check_condition_result;
4161 switch (select_report) {
4162 case 0: /* all LUNs apart from W-LUNs */
4163 lun_cnt = sdebug_max_luns;
4166 case 1: /* only W-LUNs */
4170 case 2: /* all LUNs */
4171 lun_cnt = sdebug_max_luns;
4174 case 0x10: /* only administrative LUs */
4175 case 0x11: /* see SPC-5 */
4176 case 0x12: /* only subsiduary LUs owned by referenced LU */
4178 pr_debug("select report invalid %d\n", select_report);
4179 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4180 return check_condition_result;
4183 if (sdebug_no_lun_0 && (lun_cnt > 0))
4186 tlun_cnt = lun_cnt + wlun_cnt;
4187 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4188 scsi_set_resid(scp, scsi_bufflen(scp));
4189 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4190 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4192 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4193 lun = sdebug_no_lun_0 ? 1 : 0;
4194 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4195 memset(arr, 0, sizeof(arr));
4196 lun_p = (struct scsi_lun *)&arr[0];
4198 put_unaligned_be32(rlen, &arr[0]);
4202 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4203 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4205 int_to_scsilun(lun++, lun_p);
4206 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4207 lun_p->scsi_lun[0] |= 0x40;
4209 if (j < RL_BUCKET_ELEMS)
4212 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4218 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4222 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4226 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4228 bool is_bytchk3 = false;
4231 u32 vnum, a_num, off;
4232 const u32 lb_size = sdebug_sector_size;
4235 u8 *cmd = scp->cmnd;
4236 struct sdeb_store_info *sip = devip2sip(devip, true);
4237 rwlock_t *macc_lckp = &sip->macc_lck;
4239 bytchk = (cmd[1] >> 1) & 0x3;
4241 return 0; /* always claim internal verify okay */
4242 } else if (bytchk == 2) {
4243 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4244 return check_condition_result;
4245 } else if (bytchk == 3) {
4246 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4250 lba = get_unaligned_be64(cmd + 2);
4251 vnum = get_unaligned_be32(cmd + 10);
4253 case VERIFY: /* is VERIFY(10) */
4254 lba = get_unaligned_be32(cmd + 2);
4255 vnum = get_unaligned_be16(cmd + 7);
4258 mk_sense_invalid_opcode(scp);
4259 return check_condition_result;
4261 a_num = is_bytchk3 ? 1 : vnum;
4262 /* Treat following check like one for read (i.e. no write) access */
4263 ret = check_device_access_params(scp, lba, a_num, false);
4267 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4269 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4271 return check_condition_result;
4273 /* Not changing store, so only need read access */
4274 read_lock(macc_lckp);
4276 ret = do_dout_fetch(scp, a_num, arr);
4278 ret = DID_ERROR << 16;
4280 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4281 sdev_printk(KERN_INFO, scp->device,
4282 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4283 my_name, __func__, a_num * lb_size, ret);
4286 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4287 memcpy(arr + off, arr, lb_size);
4290 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4291 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4292 ret = check_condition_result;
4296 read_unlock(macc_lckp);
4301 #define RZONES_DESC_HD 64
4303 /* Report zones depending on start LBA nad reporting options */
4304 static int resp_report_zones(struct scsi_cmnd *scp,
4305 struct sdebug_dev_info *devip)
4307 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4309 u32 alloc_len, rep_opts, rep_len;
4312 u8 *arr = NULL, *desc;
4313 u8 *cmd = scp->cmnd;
4314 struct sdeb_zone_state *zsp;
4315 struct sdeb_store_info *sip = devip2sip(devip, false);
4316 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4318 if (!sdebug_dev_is_zoned(devip)) {
4319 mk_sense_invalid_opcode(scp);
4320 return check_condition_result;
4322 zs_lba = get_unaligned_be64(cmd + 2);
4323 alloc_len = get_unaligned_be32(cmd + 10);
4324 rep_opts = cmd[14] & 0x3f;
4325 partial = cmd[14] & 0x80;
4327 if (zs_lba >= sdebug_capacity) {
4328 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4329 return check_condition_result;
4332 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4333 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4336 arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4338 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4340 return check_condition_result;
4343 read_lock(macc_lckp);
4346 for (i = 0; i < max_zones; i++) {
4347 lba = zs_lba + devip->zsize * i;
4348 if (lba > sdebug_capacity)
4350 zsp = zbc_zone(devip, lba);
4357 if (zsp->z_cond != ZC1_EMPTY)
4361 /* Implicit open zones */
4362 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4366 /* Explicit open zones */
4367 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4372 if (zsp->z_cond != ZC4_CLOSED)
4377 if (zsp->z_cond != ZC5_FULL)
4384 * Read-only, offline, reset WP recommended are
4385 * not emulated: no zones to report;
4389 /* non-seq-resource set */
4390 if (!zsp->z_non_seq_resource)
4394 /* Not write pointer (conventional) zones */
4395 if (!zbc_zone_is_conv(zsp))
4399 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4400 INVALID_FIELD_IN_CDB, 0);
4401 ret = check_condition_result;
4405 if (nrz < rep_max_zones) {
4406 /* Fill zone descriptor */
4407 desc[0] = zsp->z_type;
4408 desc[1] = zsp->z_cond << 4;
4409 if (zsp->z_non_seq_resource)
4411 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4412 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4413 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4417 if (partial && nrz >= rep_max_zones)
4424 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4425 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4427 rep_len = (unsigned long)desc - (unsigned long)arr;
4428 ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4431 read_unlock(macc_lckp);
4436 /* Logic transplanted from tcmu-runner, file_zbc.c */
4437 static void zbc_open_all(struct sdebug_dev_info *devip)
4439 struct sdeb_zone_state *zsp = &devip->zstate[0];
4442 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4443 if (zsp->z_cond == ZC4_CLOSED)
4444 zbc_open_zone(devip, &devip->zstate[i], true);
4448 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4452 enum sdebug_z_cond zc;
4453 u8 *cmd = scp->cmnd;
4454 struct sdeb_zone_state *zsp;
4455 bool all = cmd[14] & 0x01;
4456 struct sdeb_store_info *sip = devip2sip(devip, false);
4457 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4459 if (!sdebug_dev_is_zoned(devip)) {
4460 mk_sense_invalid_opcode(scp);
4461 return check_condition_result;
4464 write_lock(macc_lckp);
4467 /* Check if all closed zones can be open */
4468 if (devip->max_open &&
4469 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4470 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4472 res = check_condition_result;
4475 /* Open all closed zones */
4476 zbc_open_all(devip);
4480 /* Open the specified zone */
4481 z_id = get_unaligned_be64(cmd + 2);
4482 if (z_id >= sdebug_capacity) {
4483 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4484 res = check_condition_result;
4488 zsp = zbc_zone(devip, z_id);
4489 if (z_id != zsp->z_start) {
4490 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4491 res = check_condition_result;
4494 if (zbc_zone_is_conv(zsp)) {
4495 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4496 res = check_condition_result;
4501 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4504 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4505 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4507 res = check_condition_result;
4511 zbc_open_zone(devip, zsp, true);
4513 write_unlock(macc_lckp);
4517 static void zbc_close_all(struct sdebug_dev_info *devip)
4521 for (i = 0; i < devip->nr_zones; i++)
4522 zbc_close_zone(devip, &devip->zstate[i]);
4525 static int resp_close_zone(struct scsi_cmnd *scp,
4526 struct sdebug_dev_info *devip)
4530 u8 *cmd = scp->cmnd;
4531 struct sdeb_zone_state *zsp;
4532 bool all = cmd[14] & 0x01;
4533 struct sdeb_store_info *sip = devip2sip(devip, false);
4534 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4536 if (!sdebug_dev_is_zoned(devip)) {
4537 mk_sense_invalid_opcode(scp);
4538 return check_condition_result;
4541 write_lock(macc_lckp);
4544 zbc_close_all(devip);
4548 /* Close specified zone */
4549 z_id = get_unaligned_be64(cmd + 2);
4550 if (z_id >= sdebug_capacity) {
4551 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4552 res = check_condition_result;
4556 zsp = zbc_zone(devip, z_id);
4557 if (z_id != zsp->z_start) {
4558 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4559 res = check_condition_result;
4562 if (zbc_zone_is_conv(zsp)) {
4563 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4564 res = check_condition_result;
4568 zbc_close_zone(devip, zsp);
4570 write_unlock(macc_lckp);
4574 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4575 struct sdeb_zone_state *zsp, bool empty)
4577 enum sdebug_z_cond zc = zsp->z_cond;
4579 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4580 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4581 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4582 zbc_close_zone(devip, zsp);
4583 if (zsp->z_cond == ZC4_CLOSED)
4585 zsp->z_wp = zsp->z_start + zsp->z_size;
4586 zsp->z_cond = ZC5_FULL;
4590 static void zbc_finish_all(struct sdebug_dev_info *devip)
4594 for (i = 0; i < devip->nr_zones; i++)
4595 zbc_finish_zone(devip, &devip->zstate[i], false);
4598 static int resp_finish_zone(struct scsi_cmnd *scp,
4599 struct sdebug_dev_info *devip)
4601 struct sdeb_zone_state *zsp;
4604 u8 *cmd = scp->cmnd;
4605 bool all = cmd[14] & 0x01;
4606 struct sdeb_store_info *sip = devip2sip(devip, false);
4607 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4609 if (!sdebug_dev_is_zoned(devip)) {
4610 mk_sense_invalid_opcode(scp);
4611 return check_condition_result;
4614 write_lock(macc_lckp);
4617 zbc_finish_all(devip);
4621 /* Finish the specified zone */
4622 z_id = get_unaligned_be64(cmd + 2);
4623 if (z_id >= sdebug_capacity) {
4624 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4625 res = check_condition_result;
4629 zsp = zbc_zone(devip, z_id);
4630 if (z_id != zsp->z_start) {
4631 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4632 res = check_condition_result;
4635 if (zbc_zone_is_conv(zsp)) {
4636 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4637 res = check_condition_result;
4641 zbc_finish_zone(devip, zsp, true);
4643 write_unlock(macc_lckp);
4647 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4648 struct sdeb_zone_state *zsp)
4650 enum sdebug_z_cond zc;
4652 if (zbc_zone_is_conv(zsp))
4656 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4657 zbc_close_zone(devip, zsp);
4659 if (zsp->z_cond == ZC4_CLOSED)
4662 zsp->z_non_seq_resource = false;
4663 zsp->z_wp = zsp->z_start;
4664 zsp->z_cond = ZC1_EMPTY;
4667 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4671 for (i = 0; i < devip->nr_zones; i++)
4672 zbc_rwp_zone(devip, &devip->zstate[i]);
4675 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4677 struct sdeb_zone_state *zsp;
4680 u8 *cmd = scp->cmnd;
4681 bool all = cmd[14] & 0x01;
4682 struct sdeb_store_info *sip = devip2sip(devip, false);
4683 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4685 if (!sdebug_dev_is_zoned(devip)) {
4686 mk_sense_invalid_opcode(scp);
4687 return check_condition_result;
4690 write_lock(macc_lckp);
4697 z_id = get_unaligned_be64(cmd + 2);
4698 if (z_id >= sdebug_capacity) {
4699 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4700 res = check_condition_result;
4704 zsp = zbc_zone(devip, z_id);
4705 if (z_id != zsp->z_start) {
4706 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4707 res = check_condition_result;
4710 if (zbc_zone_is_conv(zsp)) {
4711 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4712 res = check_condition_result;
4716 zbc_rwp_zone(devip, zsp);
4718 write_unlock(macc_lckp);
4722 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4725 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4727 hwq = blk_mq_unique_tag_to_hwq(tag);
4729 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4730 if (WARN_ON_ONCE(hwq >= submit_queues))
4733 return sdebug_q_arr + hwq;
4736 static u32 get_tag(struct scsi_cmnd *cmnd)
4738 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4741 /* Queued (deferred) command completions converge here. */
4742 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4744 bool aborted = sd_dp->aborted;
4747 unsigned long iflags;
4748 struct sdebug_queue *sqp;
4749 struct sdebug_queued_cmd *sqcp;
4750 struct scsi_cmnd *scp;
4751 struct sdebug_dev_info *devip;
4753 if (unlikely(aborted))
4754 sd_dp->aborted = false;
4755 qc_idx = sd_dp->qc_idx;
4756 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4757 if (sdebug_statistics) {
4758 atomic_inc(&sdebug_completions);
4759 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4760 atomic_inc(&sdebug_miss_cpus);
4762 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4763 pr_err("wild qc_idx=%d\n", qc_idx);
4766 spin_lock_irqsave(&sqp->qc_lock, iflags);
4767 sd_dp->defer_t = SDEB_DEFER_NONE;
4768 sqcp = &sqp->qc_arr[qc_idx];
4770 if (unlikely(scp == NULL)) {
4771 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4772 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4773 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4776 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4778 atomic_dec(&devip->num_in_q);
4780 pr_err("devip=NULL\n");
4781 if (unlikely(atomic_read(&retired_max_queue) > 0))
4784 sqcp->a_cmnd = NULL;
4785 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4786 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4787 pr_err("Unexpected completion\n");
4791 if (unlikely(retiring)) { /* user has reduced max_queue */
4794 retval = atomic_read(&retired_max_queue);
4795 if (qc_idx >= retval) {
4796 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4797 pr_err("index %d too large\n", retval);
4800 k = find_last_bit(sqp->in_use_bm, retval);
4801 if ((k < sdebug_max_queue) || (k == retval))
4802 atomic_set(&retired_max_queue, 0);
4804 atomic_set(&retired_max_queue, k + 1);
4806 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4807 if (unlikely(aborted)) {
4809 pr_info("bypassing scsi_done() due to aborted cmd\n");
4812 scp->scsi_done(scp); /* callback to mid level */
4815 /* When high resolution timer goes off this function is called. */
4816 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4818 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4820 sdebug_q_cmd_complete(sd_dp);
4821 return HRTIMER_NORESTART;
4824 /* When work queue schedules work, it calls this function. */
4825 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4827 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4829 sdebug_q_cmd_complete(sd_dp);
4832 static bool got_shared_uuid;
4833 static uuid_t shared_uuid;
4835 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4837 struct sdeb_zone_state *zsp;
4838 sector_t capacity = get_sdebug_capacity();
4839 sector_t zstart = 0;
4843 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4844 * a zone size allowing for at least 4 zones on the device. Otherwise,
4845 * use the specified zone size checking that at least 2 zones can be
4846 * created for the device.
4848 if (!sdeb_zbc_zone_size_mb) {
4849 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4850 >> ilog2(sdebug_sector_size);
4851 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4853 if (devip->zsize < 2) {
4854 pr_err("Device capacity too small\n");
4858 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4859 pr_err("Zone size is not a power of 2\n");
4862 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4863 >> ilog2(sdebug_sector_size);
4864 if (devip->zsize >= capacity) {
4865 pr_err("Zone size too large for device capacity\n");
4870 devip->zsize_shift = ilog2(devip->zsize);
4871 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4873 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4874 pr_err("Number of conventional zones too large\n");
4877 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4879 if (devip->zmodel == BLK_ZONED_HM) {
4880 /* zbc_max_open_zones can be 0, meaning "not reported" */
4881 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4882 devip->max_open = (devip->nr_zones - 1) / 2;
4884 devip->max_open = sdeb_zbc_max_open;
4887 devip->zstate = kcalloc(devip->nr_zones,
4888 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4892 for (i = 0; i < devip->nr_zones; i++) {
4893 zsp = &devip->zstate[i];
4895 zsp->z_start = zstart;
4897 if (i < devip->nr_conv_zones) {
4898 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4899 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4900 zsp->z_wp = (sector_t)-1;
4902 if (devip->zmodel == BLK_ZONED_HM)
4903 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4905 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4906 zsp->z_cond = ZC1_EMPTY;
4907 zsp->z_wp = zsp->z_start;
4910 if (zsp->z_start + devip->zsize < capacity)
4911 zsp->z_size = devip->zsize;
4913 zsp->z_size = capacity - zsp->z_start;
4915 zstart += zsp->z_size;
4921 static struct sdebug_dev_info *sdebug_device_create(
4922 struct sdebug_host_info *sdbg_host, gfp_t flags)
4924 struct sdebug_dev_info *devip;
4926 devip = kzalloc(sizeof(*devip), flags);
4928 if (sdebug_uuid_ctl == 1)
4929 uuid_gen(&devip->lu_name);
4930 else if (sdebug_uuid_ctl == 2) {
4931 if (got_shared_uuid)
4932 devip->lu_name = shared_uuid;
4934 uuid_gen(&shared_uuid);
4935 got_shared_uuid = true;
4936 devip->lu_name = shared_uuid;
4939 devip->sdbg_host = sdbg_host;
4940 if (sdeb_zbc_in_use) {
4941 devip->zmodel = sdeb_zbc_model;
4942 if (sdebug_device_create_zones(devip)) {
4947 devip->zmodel = BLK_ZONED_NONE;
4949 devip->sdbg_host = sdbg_host;
4950 devip->create_ts = ktime_get_boottime();
4951 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4952 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4957 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4959 struct sdebug_host_info *sdbg_host;
4960 struct sdebug_dev_info *open_devip = NULL;
4961 struct sdebug_dev_info *devip;
4963 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4965 pr_err("Host info NULL\n");
4969 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4970 if ((devip->used) && (devip->channel == sdev->channel) &&
4971 (devip->target == sdev->id) &&
4972 (devip->lun == sdev->lun))
4975 if ((!devip->used) && (!open_devip))
4979 if (!open_devip) { /* try and make a new one */
4980 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4982 pr_err("out of memory at line %d\n", __LINE__);
4987 open_devip->channel = sdev->channel;
4988 open_devip->target = sdev->id;
4989 open_devip->lun = sdev->lun;
4990 open_devip->sdbg_host = sdbg_host;
4991 atomic_set(&open_devip->num_in_q, 0);
4992 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4993 open_devip->used = true;
4997 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5000 pr_info("slave_alloc <%u %u %u %llu>\n",
5001 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5005 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5007 struct sdebug_dev_info *devip =
5008 (struct sdebug_dev_info *)sdp->hostdata;
5011 pr_info("slave_configure <%u %u %u %llu>\n",
5012 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5013 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5014 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5015 if (devip == NULL) {
5016 devip = find_build_dev_info(sdp);
5018 return 1; /* no resources, will be marked offline */
5020 sdp->hostdata = devip;
5022 sdp->no_uld_attach = 1;
5023 config_cdb_len(sdp);
5027 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5029 struct sdebug_dev_info *devip =
5030 (struct sdebug_dev_info *)sdp->hostdata;
5033 pr_info("slave_destroy <%u %u %u %llu>\n",
5034 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5036 /* make this slot available for re-use */
5037 devip->used = false;
5038 sdp->hostdata = NULL;
5042 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5043 enum sdeb_defer_type defer_t)
5047 if (defer_t == SDEB_DEFER_HRT)
5048 hrtimer_cancel(&sd_dp->hrt);
5049 else if (defer_t == SDEB_DEFER_WQ)
5050 cancel_work_sync(&sd_dp->ew.work);
5053 /* If @cmnd found deletes its timer or work queue and returns true; else
5055 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5057 unsigned long iflags;
5058 int j, k, qmax, r_qmax;
5059 enum sdeb_defer_type l_defer_t;
5060 struct sdebug_queue *sqp;
5061 struct sdebug_queued_cmd *sqcp;
5062 struct sdebug_dev_info *devip;
5063 struct sdebug_defer *sd_dp;
5065 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5066 spin_lock_irqsave(&sqp->qc_lock, iflags);
5067 qmax = sdebug_max_queue;
5068 r_qmax = atomic_read(&retired_max_queue);
5071 for (k = 0; k < qmax; ++k) {
5072 if (test_bit(k, sqp->in_use_bm)) {
5073 sqcp = &sqp->qc_arr[k];
5074 if (cmnd != sqcp->a_cmnd)
5077 devip = (struct sdebug_dev_info *)
5078 cmnd->device->hostdata;
5080 atomic_dec(&devip->num_in_q);
5081 sqcp->a_cmnd = NULL;
5082 sd_dp = sqcp->sd_dp;
5084 l_defer_t = sd_dp->defer_t;
5085 sd_dp->defer_t = SDEB_DEFER_NONE;
5087 l_defer_t = SDEB_DEFER_NONE;
5088 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5089 stop_qc_helper(sd_dp, l_defer_t);
5090 clear_bit(k, sqp->in_use_bm);
5094 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5099 /* Deletes (stops) timers or work queues of all queued commands */
5100 static void stop_all_queued(void)
5102 unsigned long iflags;
5104 enum sdeb_defer_type l_defer_t;
5105 struct sdebug_queue *sqp;
5106 struct sdebug_queued_cmd *sqcp;
5107 struct sdebug_dev_info *devip;
5108 struct sdebug_defer *sd_dp;
5110 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5111 spin_lock_irqsave(&sqp->qc_lock, iflags);
5112 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5113 if (test_bit(k, sqp->in_use_bm)) {
5114 sqcp = &sqp->qc_arr[k];
5115 if (sqcp->a_cmnd == NULL)
5117 devip = (struct sdebug_dev_info *)
5118 sqcp->a_cmnd->device->hostdata;
5120 atomic_dec(&devip->num_in_q);
5121 sqcp->a_cmnd = NULL;
5122 sd_dp = sqcp->sd_dp;
5124 l_defer_t = sd_dp->defer_t;
5125 sd_dp->defer_t = SDEB_DEFER_NONE;
5127 l_defer_t = SDEB_DEFER_NONE;
5128 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5129 stop_qc_helper(sd_dp, l_defer_t);
5130 clear_bit(k, sqp->in_use_bm);
5131 spin_lock_irqsave(&sqp->qc_lock, iflags);
5134 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5138 /* Free queued command memory on heap */
5139 static void free_all_queued(void)
5142 struct sdebug_queue *sqp;
5143 struct sdebug_queued_cmd *sqcp;
5145 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5146 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5147 sqcp = &sqp->qc_arr[k];
5154 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5160 ok = stop_queued_cmnd(SCpnt);
5161 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5162 sdev_printk(KERN_INFO, SCpnt->device,
5163 "%s: command%s found\n", __func__,
5169 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5172 if (SCpnt && SCpnt->device) {
5173 struct scsi_device *sdp = SCpnt->device;
5174 struct sdebug_dev_info *devip =
5175 (struct sdebug_dev_info *)sdp->hostdata;
5177 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5178 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5180 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5185 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5187 struct sdebug_host_info *sdbg_host;
5188 struct sdebug_dev_info *devip;
5189 struct scsi_device *sdp;
5190 struct Scsi_Host *hp;
5193 ++num_target_resets;
5196 sdp = SCpnt->device;
5199 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5200 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5204 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5206 list_for_each_entry(devip,
5207 &sdbg_host->dev_info_list,
5209 if (devip->target == sdp->id) {
5210 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5214 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5215 sdev_printk(KERN_INFO, sdp,
5216 "%s: %d device(s) found in target\n", __func__, k);
5221 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5223 struct sdebug_host_info *sdbg_host;
5224 struct sdebug_dev_info *devip;
5225 struct scsi_device *sdp;
5226 struct Scsi_Host *hp;
5230 if (!(SCpnt && SCpnt->device))
5232 sdp = SCpnt->device;
5233 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5234 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5237 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5239 list_for_each_entry(devip,
5240 &sdbg_host->dev_info_list,
5242 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5247 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5248 sdev_printk(KERN_INFO, sdp,
5249 "%s: %d device(s) found in host\n", __func__, k);
5254 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5256 struct sdebug_host_info *sdbg_host;
5257 struct sdebug_dev_info *devip;
5261 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5262 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5263 spin_lock(&sdebug_host_list_lock);
5264 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5265 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5267 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5271 spin_unlock(&sdebug_host_list_lock);
5273 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5274 sdev_printk(KERN_INFO, SCpnt->device,
5275 "%s: %d device(s) found\n", __func__, k);
5279 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5281 struct msdos_partition *pp;
5282 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5283 int sectors_per_part, num_sectors, k;
5284 int heads_by_sects, start_sec, end_sec;
5286 /* assume partition table already zeroed */
5287 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5289 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5290 sdebug_num_parts = SDEBUG_MAX_PARTS;
5291 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5293 num_sectors = (int)get_sdebug_capacity();
5294 sectors_per_part = (num_sectors - sdebug_sectors_per)
5296 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5297 starts[0] = sdebug_sectors_per;
5298 max_part_secs = sectors_per_part;
5299 for (k = 1; k < sdebug_num_parts; ++k) {
5300 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5302 if (starts[k] - starts[k - 1] < max_part_secs)
5303 max_part_secs = starts[k] - starts[k - 1];
5305 starts[sdebug_num_parts] = num_sectors;
5306 starts[sdebug_num_parts + 1] = 0;
5308 ramp[510] = 0x55; /* magic partition markings */
5310 pp = (struct msdos_partition *)(ramp + 0x1be);
5311 for (k = 0; starts[k + 1]; ++k, ++pp) {
5312 start_sec = starts[k];
5313 end_sec = starts[k] + max_part_secs - 1;
5316 pp->cyl = start_sec / heads_by_sects;
5317 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5318 / sdebug_sectors_per;
5319 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5321 pp->end_cyl = end_sec / heads_by_sects;
5322 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5323 / sdebug_sectors_per;
5324 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5326 pp->start_sect = cpu_to_le32(start_sec);
5327 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5328 pp->sys_ind = 0x83; /* plain Linux partition */
5332 static void block_unblock_all_queues(bool block)
5335 struct sdebug_queue *sqp;
5337 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5338 atomic_set(&sqp->blocked, (int)block);
5341 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5342 * commands will be processed normally before triggers occur.
5344 static void tweak_cmnd_count(void)
5348 modulo = abs(sdebug_every_nth);
5351 block_unblock_all_queues(true);
5352 count = atomic_read(&sdebug_cmnd_count);
5353 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5354 block_unblock_all_queues(false);
5357 static void clear_queue_stats(void)
5359 atomic_set(&sdebug_cmnd_count, 0);
5360 atomic_set(&sdebug_completions, 0);
5361 atomic_set(&sdebug_miss_cpus, 0);
5362 atomic_set(&sdebug_a_tsf, 0);
5365 static bool inject_on_this_cmd(void)
5367 if (sdebug_every_nth == 0)
5369 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5372 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5374 /* Complete the processing of the thread that queued a SCSI command to this
5375 * driver. It either completes the command by calling cmnd_done() or
5376 * schedules a hr timer or work queue then returns 0. Returns
5377 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5379 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5381 int (*pfp)(struct scsi_cmnd *,
5382 struct sdebug_dev_info *),
5383 int delta_jiff, int ndelay)
5386 bool inject = false;
5387 bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
5388 int k, num_in_q, qdepth;
5389 unsigned long iflags;
5390 u64 ns_from_boot = 0;
5391 struct sdebug_queue *sqp;
5392 struct sdebug_queued_cmd *sqcp;
5393 struct scsi_device *sdp;
5394 struct sdebug_defer *sd_dp;
5396 if (unlikely(devip == NULL)) {
5397 if (scsi_result == 0)
5398 scsi_result = DID_NO_CONNECT << 16;
5399 goto respond_in_thread;
5403 if (delta_jiff == 0)
5404 goto respond_in_thread;
5406 sqp = get_queue(cmnd);
5407 spin_lock_irqsave(&sqp->qc_lock, iflags);
5408 if (unlikely(atomic_read(&sqp->blocked))) {
5409 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5410 return SCSI_MLQUEUE_HOST_BUSY;
5412 num_in_q = atomic_read(&devip->num_in_q);
5413 qdepth = cmnd->device->queue_depth;
5414 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5416 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5417 goto respond_in_thread;
5419 scsi_result = device_qfull_result;
5420 } else if (unlikely(sdebug_every_nth &&
5421 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5422 (scsi_result == 0))) {
5423 if ((num_in_q == (qdepth - 1)) &&
5424 (atomic_inc_return(&sdebug_a_tsf) >=
5425 abs(sdebug_every_nth))) {
5426 atomic_set(&sdebug_a_tsf, 0);
5428 scsi_result = device_qfull_result;
5432 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5433 if (unlikely(k >= sdebug_max_queue)) {
5434 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5436 goto respond_in_thread;
5437 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5438 scsi_result = device_qfull_result;
5439 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5440 sdev_printk(KERN_INFO, sdp,
5441 "%s: max_queue=%d exceeded, %s\n",
5442 __func__, sdebug_max_queue,
5443 (scsi_result ? "status: TASK SET FULL" :
5444 "report: host busy"));
5446 goto respond_in_thread;
5448 return SCSI_MLQUEUE_HOST_BUSY;
5450 set_bit(k, sqp->in_use_bm);
5451 atomic_inc(&devip->num_in_q);
5452 sqcp = &sqp->qc_arr[k];
5453 sqcp->a_cmnd = cmnd;
5454 cmnd->host_scribble = (unsigned char *)sqcp;
5455 sd_dp = sqcp->sd_dp;
5456 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5459 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5461 atomic_dec(&devip->num_in_q);
5462 clear_bit(k, sqp->in_use_bm);
5463 return SCSI_MLQUEUE_HOST_BUSY;
5470 /* Set the hostwide tag */
5471 if (sdebug_host_max_queue)
5472 sd_dp->hc_idx = get_tag(cmnd);
5475 ns_from_boot = ktime_get_boottime_ns();
5477 /* one of the resp_*() response functions is called here */
5478 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5479 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5480 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5481 delta_jiff = ndelay = 0;
5483 if (cmnd->result == 0 && scsi_result != 0)
5484 cmnd->result = scsi_result;
5485 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5486 if (atomic_read(&sdeb_inject_pending)) {
5487 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5488 atomic_set(&sdeb_inject_pending, 0);
5489 cmnd->result = check_condition_result;
5493 if (unlikely(sdebug_verbose && cmnd->result))
5494 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5495 __func__, cmnd->result);
5497 if (delta_jiff > 0 || ndelay > 0) {
5500 if (delta_jiff > 0) {
5501 u64 ns = jiffies_to_nsecs(delta_jiff);
5503 if (sdebug_random && ns < U32_MAX) {
5504 ns = prandom_u32_max((u32)ns);
5505 } else if (sdebug_random) {
5506 ns >>= 12; /* scale to 4 usec precision */
5507 if (ns < U32_MAX) /* over 4 hours max */
5508 ns = prandom_u32_max((u32)ns);
5511 kt = ns_to_ktime(ns);
5512 } else { /* ndelay has a 4.2 second max */
5513 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5515 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5516 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5518 if (kt <= d) { /* elapsed duration >= kt */
5519 spin_lock_irqsave(&sqp->qc_lock, iflags);
5520 sqcp->a_cmnd = NULL;
5521 atomic_dec(&devip->num_in_q);
5522 clear_bit(k, sqp->in_use_bm);
5523 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5526 /* call scsi_done() from this thread */
5527 cmnd->scsi_done(cmnd);
5530 /* otherwise reduce kt by elapsed time */
5535 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5536 spin_lock_irqsave(&sqp->qc_lock, iflags);
5537 if (!sd_dp->init_poll) {
5538 sd_dp->init_poll = true;
5539 sqcp->sd_dp = sd_dp;
5540 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5543 sd_dp->defer_t = SDEB_DEFER_POLL;
5544 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5546 if (!sd_dp->init_hrt) {
5547 sd_dp->init_hrt = true;
5548 sqcp->sd_dp = sd_dp;
5549 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5550 HRTIMER_MODE_REL_PINNED);
5551 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5552 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5555 sd_dp->defer_t = SDEB_DEFER_HRT;
5556 /* schedule the invocation of scsi_done() for a later time */
5557 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5559 if (sdebug_statistics)
5560 sd_dp->issuing_cpu = raw_smp_processor_id();
5561 } else { /* jdelay < 0, use work queue */
5562 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5563 atomic_read(&sdeb_inject_pending)))
5564 sd_dp->aborted = true;
5566 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5567 spin_lock_irqsave(&sqp->qc_lock, iflags);
5568 if (!sd_dp->init_poll) {
5569 sd_dp->init_poll = true;
5570 sqcp->sd_dp = sd_dp;
5571 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5574 sd_dp->defer_t = SDEB_DEFER_POLL;
5575 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5577 if (!sd_dp->init_wq) {
5578 sd_dp->init_wq = true;
5579 sqcp->sd_dp = sd_dp;
5580 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5582 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5584 sd_dp->defer_t = SDEB_DEFER_WQ;
5585 schedule_work(&sd_dp->ew.work);
5587 if (sdebug_statistics)
5588 sd_dp->issuing_cpu = raw_smp_processor_id();
5589 if (unlikely(sd_dp->aborted)) {
5590 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5591 scsi_cmd_to_rq(cmnd)->tag);
5592 blk_abort_request(scsi_cmd_to_rq(cmnd));
5593 atomic_set(&sdeb_inject_pending, 0);
5594 sd_dp->aborted = false;
5597 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5598 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5599 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5602 respond_in_thread: /* call back to mid-layer using invocation thread */
5603 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5604 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5605 if (cmnd->result == 0 && scsi_result != 0)
5606 cmnd->result = scsi_result;
5607 cmnd->scsi_done(cmnd);
5611 /* Note: The following macros create attribute files in the
5612 /sys/module/scsi_debug/parameters directory. Unfortunately this
5613 driver is unaware of a change and cannot trigger auxiliary actions
5614 as it can when the corresponding attribute in the
5615 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5617 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5618 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5619 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5620 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5621 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5622 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5623 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5624 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5625 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5626 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5627 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5628 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5629 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5630 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5631 module_param_string(inq_product, sdebug_inq_product_id,
5632 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5633 module_param_string(inq_rev, sdebug_inq_product_rev,
5634 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5635 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5636 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5637 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5638 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5639 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5640 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5641 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5642 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5643 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5644 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5645 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5647 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5649 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5650 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5651 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5652 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5653 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5654 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5655 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5656 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5657 module_param_named(per_host_store, sdebug_per_host_store, bool,
5659 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5660 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5661 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5662 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5663 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5664 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5665 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5666 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5667 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5668 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5669 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5670 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5671 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5672 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5673 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5674 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5675 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5676 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5678 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5679 module_param_named(write_same_length, sdebug_write_same_length, int,
5681 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5682 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5683 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5684 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5686 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5687 MODULE_DESCRIPTION("SCSI debug adapter driver");
5688 MODULE_LICENSE("GPL");
5689 MODULE_VERSION(SDEBUG_VERSION);
5691 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5692 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5693 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5694 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5695 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5696 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5697 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5698 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5699 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5700 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5701 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5702 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5703 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5704 MODULE_PARM_DESC(host_max_queue,
5705 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5706 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5707 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5708 SDEBUG_VERSION "\")");
5709 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5710 MODULE_PARM_DESC(lbprz,
5711 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5712 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5713 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5714 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5715 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5716 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5717 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5718 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5719 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5720 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5721 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5722 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5723 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5724 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5725 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5726 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5727 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5728 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5729 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5730 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5731 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5732 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5733 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5734 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5735 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5736 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5737 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5738 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5739 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5740 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5741 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5742 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5743 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5744 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5745 MODULE_PARM_DESC(uuid_ctl,
5746 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5747 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5748 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5749 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5750 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5751 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5752 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5753 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5754 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5756 #define SDEBUG_INFO_LEN 256
5757 static char sdebug_info[SDEBUG_INFO_LEN];
5759 static const char *scsi_debug_info(struct Scsi_Host *shp)
5763 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5764 my_name, SDEBUG_VERSION, sdebug_version_date);
5765 if (k >= (SDEBUG_INFO_LEN - 1))
5767 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5768 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5769 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5770 "statistics", (int)sdebug_statistics);
5774 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5775 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5780 int minLen = length > 15 ? 15 : length;
5782 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5784 memcpy(arr, buffer, minLen);
5786 if (1 != sscanf(arr, "%d", &opts))
5789 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5790 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5791 if (sdebug_every_nth != 0)
5796 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5797 * same for each scsi_debug host (if more than one). Some of the counters
5798 * output are not atomics so might be inaccurate in a busy system. */
5799 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5802 struct sdebug_queue *sqp;
5803 struct sdebug_host_info *sdhp;
5805 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5806 SDEBUG_VERSION, sdebug_version_date);
5807 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5808 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5809 sdebug_opts, sdebug_every_nth);
5810 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5811 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5812 sdebug_sector_size, "bytes");
5813 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5814 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5816 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5817 num_dev_resets, num_target_resets, num_bus_resets,
5819 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5820 dix_reads, dix_writes, dif_errors);
5821 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5823 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5824 atomic_read(&sdebug_cmnd_count),
5825 atomic_read(&sdebug_completions),
5826 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5827 atomic_read(&sdebug_a_tsf),
5828 atomic_read(&sdeb_mq_poll_count));
5830 seq_printf(m, "submit_queues=%d\n", submit_queues);
5831 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5832 seq_printf(m, " queue %d:\n", j);
5833 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5834 if (f != sdebug_max_queue) {
5835 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5836 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5837 "first,last bits", f, l);
5841 seq_printf(m, "this host_no=%d\n", host->host_no);
5842 if (!xa_empty(per_store_ap)) {
5845 unsigned long l_idx;
5846 struct sdeb_store_info *sip;
5848 seq_puts(m, "\nhost list:\n");
5850 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5852 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5853 sdhp->shost->host_no, idx);
5856 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5857 sdeb_most_recent_idx);
5859 xa_for_each(per_store_ap, l_idx, sip) {
5860 niu = xa_get_mark(per_store_ap, l_idx,
5861 SDEB_XA_NOT_IN_USE);
5863 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5864 (niu ? " not_in_use" : ""));
5871 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5873 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5875 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5876 * of delay is jiffies.
5878 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5883 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5885 if (sdebug_jdelay != jdelay) {
5887 struct sdebug_queue *sqp;
5889 block_unblock_all_queues(true);
5890 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5892 k = find_first_bit(sqp->in_use_bm,
5894 if (k != sdebug_max_queue) {
5895 res = -EBUSY; /* queued commands */
5900 sdebug_jdelay = jdelay;
5903 block_unblock_all_queues(false);
5909 static DRIVER_ATTR_RW(delay);
5911 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5913 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5915 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5916 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5917 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5922 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5923 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5925 if (sdebug_ndelay != ndelay) {
5927 struct sdebug_queue *sqp;
5929 block_unblock_all_queues(true);
5930 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5932 k = find_first_bit(sqp->in_use_bm,
5934 if (k != sdebug_max_queue) {
5935 res = -EBUSY; /* queued commands */
5940 sdebug_ndelay = ndelay;
5941 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5944 block_unblock_all_queues(false);
5950 static DRIVER_ATTR_RW(ndelay);
5952 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5954 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5957 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5963 if (sscanf(buf, "%10s", work) == 1) {
5964 if (strncasecmp(work, "0x", 2) == 0) {
5965 if (kstrtoint(work + 2, 16, &opts) == 0)
5968 if (kstrtoint(work, 10, &opts) == 0)
5975 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5976 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5980 static DRIVER_ATTR_RW(opts);
5982 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5984 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5986 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5991 /* Cannot change from or to TYPE_ZBC with sysfs */
5992 if (sdebug_ptype == TYPE_ZBC)
5995 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6003 static DRIVER_ATTR_RW(ptype);
6005 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6007 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6009 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6014 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6020 static DRIVER_ATTR_RW(dsense);
6022 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6024 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6026 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6031 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6032 bool want_store = (n == 0);
6033 struct sdebug_host_info *sdhp;
6036 sdebug_fake_rw = (sdebug_fake_rw > 0);
6037 if (sdebug_fake_rw == n)
6038 return count; /* not transitioning so do nothing */
6040 if (want_store) { /* 1 --> 0 transition, set up store */
6041 if (sdeb_first_idx < 0) {
6042 idx = sdebug_add_store();
6046 idx = sdeb_first_idx;
6047 xa_clear_mark(per_store_ap, idx,
6048 SDEB_XA_NOT_IN_USE);
6050 /* make all hosts use same store */
6051 list_for_each_entry(sdhp, &sdebug_host_list,
6053 if (sdhp->si_idx != idx) {
6054 xa_set_mark(per_store_ap, sdhp->si_idx,
6055 SDEB_XA_NOT_IN_USE);
6059 sdeb_most_recent_idx = idx;
6060 } else { /* 0 --> 1 transition is trigger for shrink */
6061 sdebug_erase_all_stores(true /* apart from first */);
6068 static DRIVER_ATTR_RW(fake_rw);
6070 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6072 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6074 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6079 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6080 sdebug_no_lun_0 = n;
6085 static DRIVER_ATTR_RW(no_lun_0);
6087 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6089 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6091 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6096 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6097 sdebug_num_tgts = n;
6098 sdebug_max_tgts_luns();
6103 static DRIVER_ATTR_RW(num_tgts);
6105 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6107 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6109 static DRIVER_ATTR_RO(dev_size_mb);
6111 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6113 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6116 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6121 if (kstrtobool(buf, &v))
6124 sdebug_per_host_store = v;
6127 static DRIVER_ATTR_RW(per_host_store);
6129 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6131 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6133 static DRIVER_ATTR_RO(num_parts);
6135 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6137 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6139 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6145 if (sscanf(buf, "%10s", work) == 1) {
6146 if (strncasecmp(work, "0x", 2) == 0) {
6147 if (kstrtoint(work + 2, 16, &nth) == 0)
6148 goto every_nth_done;
6150 if (kstrtoint(work, 10, &nth) == 0)
6151 goto every_nth_done;
6157 sdebug_every_nth = nth;
6158 if (nth && !sdebug_statistics) {
6159 pr_info("every_nth needs statistics=1, set it\n");
6160 sdebug_statistics = true;
6165 static DRIVER_ATTR_RW(every_nth);
6167 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6169 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6171 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6177 if (kstrtoint(buf, 0, &n))
6180 if (n > (int)SAM_LUN_AM_FLAT) {
6181 pr_warn("only LUN address methods 0 and 1 are supported\n");
6184 changed = ((int)sdebug_lun_am != n);
6186 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6187 struct sdebug_host_info *sdhp;
6188 struct sdebug_dev_info *dp;
6190 spin_lock(&sdebug_host_list_lock);
6191 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6192 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6193 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6196 spin_unlock(&sdebug_host_list_lock);
6202 static DRIVER_ATTR_RW(lun_format);
6204 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6206 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6208 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6214 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6216 pr_warn("max_luns can be no more than 256\n");
6219 changed = (sdebug_max_luns != n);
6220 sdebug_max_luns = n;
6221 sdebug_max_tgts_luns();
6222 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6223 struct sdebug_host_info *sdhp;
6224 struct sdebug_dev_info *dp;
6226 spin_lock(&sdebug_host_list_lock);
6227 list_for_each_entry(sdhp, &sdebug_host_list,
6229 list_for_each_entry(dp, &sdhp->dev_info_list,
6231 set_bit(SDEBUG_UA_LUNS_CHANGED,
6235 spin_unlock(&sdebug_host_list_lock);
6241 static DRIVER_ATTR_RW(max_luns);
6243 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6245 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6247 /* N.B. max_queue can be changed while there are queued commands. In flight
6248 * commands beyond the new max_queue will be completed. */
6249 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6253 struct sdebug_queue *sqp;
6255 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6256 (n <= SDEBUG_CANQUEUE) &&
6257 (sdebug_host_max_queue == 0)) {
6258 block_unblock_all_queues(true);
6260 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6262 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6266 sdebug_max_queue = n;
6267 if (k == SDEBUG_CANQUEUE)
6268 atomic_set(&retired_max_queue, 0);
6270 atomic_set(&retired_max_queue, k + 1);
6272 atomic_set(&retired_max_queue, 0);
6273 block_unblock_all_queues(false);
6278 static DRIVER_ATTR_RW(max_queue);
6280 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6282 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6286 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6287 * in range [0, sdebug_host_max_queue), we can't change it.
6289 static DRIVER_ATTR_RO(host_max_queue);
6291 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6293 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6295 static DRIVER_ATTR_RO(no_uld);
6297 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6299 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6301 static DRIVER_ATTR_RO(scsi_level);
6303 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6305 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6307 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6313 /* Ignore capacity change for ZBC drives for now */
6314 if (sdeb_zbc_in_use)
6317 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6318 changed = (sdebug_virtual_gb != n);
6319 sdebug_virtual_gb = n;
6320 sdebug_capacity = get_sdebug_capacity();
6322 struct sdebug_host_info *sdhp;
6323 struct sdebug_dev_info *dp;
6325 spin_lock(&sdebug_host_list_lock);
6326 list_for_each_entry(sdhp, &sdebug_host_list,
6328 list_for_each_entry(dp, &sdhp->dev_info_list,
6330 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6334 spin_unlock(&sdebug_host_list_lock);
6340 static DRIVER_ATTR_RW(virtual_gb);
6342 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6344 /* absolute number of hosts currently active is what is shown */
6345 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6348 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6353 struct sdeb_store_info *sip;
6354 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6357 if (sscanf(buf, "%d", &delta_hosts) != 1)
6359 if (delta_hosts > 0) {
6363 xa_for_each_marked(per_store_ap, idx, sip,
6364 SDEB_XA_NOT_IN_USE) {
6365 sdeb_most_recent_idx = (int)idx;
6369 if (found) /* re-use case */
6370 sdebug_add_host_helper((int)idx);
6372 sdebug_do_add_host(true);
6374 sdebug_do_add_host(false);
6376 } while (--delta_hosts);
6377 } else if (delta_hosts < 0) {
6379 sdebug_do_remove_host(false);
6380 } while (++delta_hosts);
6384 static DRIVER_ATTR_RW(add_host);
6386 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6388 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6390 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6395 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6396 sdebug_vpd_use_hostno = n;
6401 static DRIVER_ATTR_RW(vpd_use_hostno);
6403 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6405 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6407 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6412 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6414 sdebug_statistics = true;
6416 clear_queue_stats();
6417 sdebug_statistics = false;
6423 static DRIVER_ATTR_RW(statistics);
6425 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6427 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6429 static DRIVER_ATTR_RO(sector_size);
6431 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6433 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6435 static DRIVER_ATTR_RO(submit_queues);
6437 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6439 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6441 static DRIVER_ATTR_RO(dix);
6443 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6445 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6447 static DRIVER_ATTR_RO(dif);
6449 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6451 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6453 static DRIVER_ATTR_RO(guard);
6455 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6457 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6459 static DRIVER_ATTR_RO(ato);
6461 static ssize_t map_show(struct device_driver *ddp, char *buf)
6465 if (!scsi_debug_lbp())
6466 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6467 sdebug_store_sectors);
6469 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6470 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6473 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6474 (int)map_size, sip->map_storep);
6476 buf[count++] = '\n';
6481 static DRIVER_ATTR_RO(map);
6483 static ssize_t random_show(struct device_driver *ddp, char *buf)
6485 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6488 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6493 if (kstrtobool(buf, &v))
6499 static DRIVER_ATTR_RW(random);
6501 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6503 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6505 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6510 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6511 sdebug_removable = (n > 0);
6516 static DRIVER_ATTR_RW(removable);
6518 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6520 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6522 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6523 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6528 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6529 sdebug_host_lock = (n > 0);
6534 static DRIVER_ATTR_RW(host_lock);
6536 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6538 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6540 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6545 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6546 sdebug_strict = (n > 0);
6551 static DRIVER_ATTR_RW(strict);
6553 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6555 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6557 static DRIVER_ATTR_RO(uuid_ctl);
6559 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6561 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6563 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6568 ret = kstrtoint(buf, 0, &n);
6572 all_config_cdb_len();
6575 static DRIVER_ATTR_RW(cdb_len);
6577 static const char * const zbc_model_strs_a[] = {
6578 [BLK_ZONED_NONE] = "none",
6579 [BLK_ZONED_HA] = "host-aware",
6580 [BLK_ZONED_HM] = "host-managed",
6583 static const char * const zbc_model_strs_b[] = {
6584 [BLK_ZONED_NONE] = "no",
6585 [BLK_ZONED_HA] = "aware",
6586 [BLK_ZONED_HM] = "managed",
6589 static const char * const zbc_model_strs_c[] = {
6590 [BLK_ZONED_NONE] = "0",
6591 [BLK_ZONED_HA] = "1",
6592 [BLK_ZONED_HM] = "2",
6595 static int sdeb_zbc_model_str(const char *cp)
6597 int res = sysfs_match_string(zbc_model_strs_a, cp);
6600 res = sysfs_match_string(zbc_model_strs_b, cp);
6602 res = sysfs_match_string(zbc_model_strs_c, cp);
6610 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6612 return scnprintf(buf, PAGE_SIZE, "%s\n",
6613 zbc_model_strs_a[sdeb_zbc_model]);
6615 static DRIVER_ATTR_RO(zbc);
6617 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6619 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6621 static DRIVER_ATTR_RO(tur_ms_to_ready);
6623 /* Note: The following array creates attribute files in the
6624 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6625 files (over those found in the /sys/module/scsi_debug/parameters
6626 directory) is that auxiliary actions can be triggered when an attribute
6627 is changed. For example see: add_host_store() above.
6630 static struct attribute *sdebug_drv_attrs[] = {
6631 &driver_attr_delay.attr,
6632 &driver_attr_opts.attr,
6633 &driver_attr_ptype.attr,
6634 &driver_attr_dsense.attr,
6635 &driver_attr_fake_rw.attr,
6636 &driver_attr_host_max_queue.attr,
6637 &driver_attr_no_lun_0.attr,
6638 &driver_attr_num_tgts.attr,
6639 &driver_attr_dev_size_mb.attr,
6640 &driver_attr_num_parts.attr,
6641 &driver_attr_every_nth.attr,
6642 &driver_attr_lun_format.attr,
6643 &driver_attr_max_luns.attr,
6644 &driver_attr_max_queue.attr,
6645 &driver_attr_no_uld.attr,
6646 &driver_attr_scsi_level.attr,
6647 &driver_attr_virtual_gb.attr,
6648 &driver_attr_add_host.attr,
6649 &driver_attr_per_host_store.attr,
6650 &driver_attr_vpd_use_hostno.attr,
6651 &driver_attr_sector_size.attr,
6652 &driver_attr_statistics.attr,
6653 &driver_attr_submit_queues.attr,
6654 &driver_attr_dix.attr,
6655 &driver_attr_dif.attr,
6656 &driver_attr_guard.attr,
6657 &driver_attr_ato.attr,
6658 &driver_attr_map.attr,
6659 &driver_attr_random.attr,
6660 &driver_attr_removable.attr,
6661 &driver_attr_host_lock.attr,
6662 &driver_attr_ndelay.attr,
6663 &driver_attr_strict.attr,
6664 &driver_attr_uuid_ctl.attr,
6665 &driver_attr_cdb_len.attr,
6666 &driver_attr_tur_ms_to_ready.attr,
6667 &driver_attr_zbc.attr,
6670 ATTRIBUTE_GROUPS(sdebug_drv);
6672 static struct device *pseudo_primary;
6674 static int __init scsi_debug_init(void)
6676 bool want_store = (sdebug_fake_rw == 0);
6678 int k, ret, hosts_to_add;
6681 ramdisk_lck_a[0] = &atomic_rw;
6682 ramdisk_lck_a[1] = &atomic_rw2;
6683 atomic_set(&retired_max_queue, 0);
6685 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6686 pr_warn("ndelay must be less than 1 second, ignored\n");
6688 } else if (sdebug_ndelay > 0)
6689 sdebug_jdelay = JDELAY_OVERRIDDEN;
6691 switch (sdebug_sector_size) {
6698 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6702 switch (sdebug_dif) {
6703 case T10_PI_TYPE0_PROTECTION:
6705 case T10_PI_TYPE1_PROTECTION:
6706 case T10_PI_TYPE2_PROTECTION:
6707 case T10_PI_TYPE3_PROTECTION:
6708 have_dif_prot = true;
6712 pr_err("dif must be 0, 1, 2 or 3\n");
6716 if (sdebug_num_tgts < 0) {
6717 pr_err("num_tgts must be >= 0\n");
6721 if (sdebug_guard > 1) {
6722 pr_err("guard must be 0 or 1\n");
6726 if (sdebug_ato > 1) {
6727 pr_err("ato must be 0 or 1\n");
6731 if (sdebug_physblk_exp > 15) {
6732 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6736 sdebug_lun_am = sdebug_lun_am_i;
6737 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6738 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6739 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6742 if (sdebug_max_luns > 256) {
6743 if (sdebug_max_luns > 16384) {
6744 pr_warn("max_luns can be no more than 16384, use default\n");
6745 sdebug_max_luns = DEF_MAX_LUNS;
6747 sdebug_lun_am = SAM_LUN_AM_FLAT;
6750 if (sdebug_lowest_aligned > 0x3fff) {
6751 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6755 if (submit_queues < 1) {
6756 pr_err("submit_queues must be 1 or more\n");
6760 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6761 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6765 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6766 (sdebug_host_max_queue < 0)) {
6767 pr_err("host_max_queue must be in range [0 %d]\n",
6772 if (sdebug_host_max_queue &&
6773 (sdebug_max_queue != sdebug_host_max_queue)) {
6774 sdebug_max_queue = sdebug_host_max_queue;
6775 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6779 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6781 if (sdebug_q_arr == NULL)
6783 for (k = 0; k < submit_queues; ++k)
6784 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6787 * check for host managed zoned block device specified with
6788 * ptype=0x14 or zbc=XXX.
6790 if (sdebug_ptype == TYPE_ZBC) {
6791 sdeb_zbc_model = BLK_ZONED_HM;
6792 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6793 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6799 switch (sdeb_zbc_model) {
6800 case BLK_ZONED_NONE:
6802 sdebug_ptype = TYPE_DISK;
6805 sdebug_ptype = TYPE_ZBC;
6808 pr_err("Invalid ZBC model\n");
6813 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6814 sdeb_zbc_in_use = true;
6815 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6816 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6819 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6820 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6821 if (sdebug_dev_size_mb < 1)
6822 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6823 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6824 sdebug_store_sectors = sz / sdebug_sector_size;
6825 sdebug_capacity = get_sdebug_capacity();
6827 /* play around with geometry, don't waste too much on track 0 */
6829 sdebug_sectors_per = 32;
6830 if (sdebug_dev_size_mb >= 256)
6832 else if (sdebug_dev_size_mb >= 16)
6834 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6835 (sdebug_sectors_per * sdebug_heads);
6836 if (sdebug_cylinders_per >= 1024) {
6837 /* other LLDs do this; implies >= 1GB ram disk ... */
6839 sdebug_sectors_per = 63;
6840 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6841 (sdebug_sectors_per * sdebug_heads);
6843 if (scsi_debug_lbp()) {
6844 sdebug_unmap_max_blocks =
6845 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6847 sdebug_unmap_max_desc =
6848 clamp(sdebug_unmap_max_desc, 0U, 256U);
6850 sdebug_unmap_granularity =
6851 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6853 if (sdebug_unmap_alignment &&
6854 sdebug_unmap_granularity <=
6855 sdebug_unmap_alignment) {
6856 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6861 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6863 idx = sdebug_add_store();
6870 pseudo_primary = root_device_register("pseudo_0");
6871 if (IS_ERR(pseudo_primary)) {
6872 pr_warn("root_device_register() error\n");
6873 ret = PTR_ERR(pseudo_primary);
6876 ret = bus_register(&pseudo_lld_bus);
6878 pr_warn("bus_register error: %d\n", ret);
6881 ret = driver_register(&sdebug_driverfs_driver);
6883 pr_warn("driver_register error: %d\n", ret);
6887 hosts_to_add = sdebug_add_host;
6888 sdebug_add_host = 0;
6890 for (k = 0; k < hosts_to_add; k++) {
6891 if (want_store && k == 0) {
6892 ret = sdebug_add_host_helper(idx);
6894 pr_err("add_host_helper k=%d, error=%d\n",
6899 ret = sdebug_do_add_host(want_store &&
6900 sdebug_per_host_store);
6902 pr_err("add_host k=%d error=%d\n", k, -ret);
6908 pr_info("built %d host(s)\n", sdebug_num_hosts);
6913 bus_unregister(&pseudo_lld_bus);
6915 root_device_unregister(pseudo_primary);
6917 sdebug_erase_store(idx, NULL);
6919 kfree(sdebug_q_arr);
6923 static void __exit scsi_debug_exit(void)
6925 int k = sdebug_num_hosts;
6929 sdebug_do_remove_host(true);
6931 driver_unregister(&sdebug_driverfs_driver);
6932 bus_unregister(&pseudo_lld_bus);
6933 root_device_unregister(pseudo_primary);
6935 sdebug_erase_all_stores(false);
6936 xa_destroy(per_store_ap);
6937 kfree(sdebug_q_arr);
6940 device_initcall(scsi_debug_init);
6941 module_exit(scsi_debug_exit);
6943 static void sdebug_release_adapter(struct device *dev)
6945 struct sdebug_host_info *sdbg_host;
6947 sdbg_host = to_sdebug_host(dev);
6951 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6952 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6957 if (xa_empty(per_store_ap))
6959 sip = xa_load(per_store_ap, idx);
6963 vfree(sip->map_storep);
6964 vfree(sip->dif_storep);
6966 xa_erase(per_store_ap, idx);
6970 /* Assume apart_from_first==false only in shutdown case. */
6971 static void sdebug_erase_all_stores(bool apart_from_first)
6974 struct sdeb_store_info *sip = NULL;
6976 xa_for_each(per_store_ap, idx, sip) {
6977 if (apart_from_first)
6978 apart_from_first = false;
6980 sdebug_erase_store(idx, sip);
6982 if (apart_from_first)
6983 sdeb_most_recent_idx = sdeb_first_idx;
6987 * Returns store xarray new element index (idx) if >=0 else negated errno.
6988 * Limit the number of stores to 65536.
6990 static int sdebug_add_store(void)
6994 unsigned long iflags;
6995 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6996 struct sdeb_store_info *sip = NULL;
6997 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6999 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7003 xa_lock_irqsave(per_store_ap, iflags);
7004 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7005 if (unlikely(res < 0)) {
7006 xa_unlock_irqrestore(per_store_ap, iflags);
7008 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7011 sdeb_most_recent_idx = n_idx;
7012 if (sdeb_first_idx < 0)
7013 sdeb_first_idx = n_idx;
7014 xa_unlock_irqrestore(per_store_ap, iflags);
7017 sip->storep = vzalloc(sz);
7019 pr_err("user data oom\n");
7022 if (sdebug_num_parts > 0)
7023 sdebug_build_parts(sip->storep, sz);
7025 /* DIF/DIX: what T10 calls Protection Information (PI) */
7029 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7030 sip->dif_storep = vmalloc(dif_size);
7032 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7035 if (!sip->dif_storep) {
7036 pr_err("DIX oom\n");
7039 memset(sip->dif_storep, 0xff, dif_size);
7041 /* Logical Block Provisioning */
7042 if (scsi_debug_lbp()) {
7043 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7044 sip->map_storep = vmalloc(array_size(sizeof(long),
7045 BITS_TO_LONGS(map_size)));
7047 pr_info("%lu provisioning blocks\n", map_size);
7049 if (!sip->map_storep) {
7050 pr_err("LBP map oom\n");
7054 bitmap_zero(sip->map_storep, map_size);
7056 /* Map first 1KB for partition table */
7057 if (sdebug_num_parts)
7058 map_region(sip, 0, 2);
7061 rwlock_init(&sip->macc_lck);
7064 sdebug_erase_store((int)n_idx, sip);
7065 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7069 static int sdebug_add_host_helper(int per_host_idx)
7071 int k, devs_per_host, idx;
7072 int error = -ENOMEM;
7073 struct sdebug_host_info *sdbg_host;
7074 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7076 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7079 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7080 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7081 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7082 sdbg_host->si_idx = idx;
7084 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7086 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7087 for (k = 0; k < devs_per_host; k++) {
7088 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7093 spin_lock(&sdebug_host_list_lock);
7094 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7095 spin_unlock(&sdebug_host_list_lock);
7097 sdbg_host->dev.bus = &pseudo_lld_bus;
7098 sdbg_host->dev.parent = pseudo_primary;
7099 sdbg_host->dev.release = &sdebug_release_adapter;
7100 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7102 error = device_register(&sdbg_host->dev);
7110 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7112 list_del(&sdbg_devinfo->dev_list);
7113 kfree(sdbg_devinfo->zstate);
7114 kfree(sdbg_devinfo);
7117 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7121 static int sdebug_do_add_host(bool mk_new_store)
7123 int ph_idx = sdeb_most_recent_idx;
7126 ph_idx = sdebug_add_store();
7130 return sdebug_add_host_helper(ph_idx);
7133 static void sdebug_do_remove_host(bool the_end)
7136 struct sdebug_host_info *sdbg_host = NULL;
7137 struct sdebug_host_info *sdbg_host2;
7139 spin_lock(&sdebug_host_list_lock);
7140 if (!list_empty(&sdebug_host_list)) {
7141 sdbg_host = list_entry(sdebug_host_list.prev,
7142 struct sdebug_host_info, host_list);
7143 idx = sdbg_host->si_idx;
7145 if (!the_end && idx >= 0) {
7148 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7149 if (sdbg_host2 == sdbg_host)
7151 if (idx == sdbg_host2->si_idx) {
7157 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7158 if (idx == sdeb_most_recent_idx)
7159 --sdeb_most_recent_idx;
7163 list_del(&sdbg_host->host_list);
7164 spin_unlock(&sdebug_host_list_lock);
7169 device_unregister(&sdbg_host->dev);
7173 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7176 struct sdebug_dev_info *devip;
7178 block_unblock_all_queues(true);
7179 devip = (struct sdebug_dev_info *)sdev->hostdata;
7180 if (NULL == devip) {
7181 block_unblock_all_queues(false);
7184 num_in_q = atomic_read(&devip->num_in_q);
7186 if (qdepth > SDEBUG_CANQUEUE) {
7187 qdepth = SDEBUG_CANQUEUE;
7188 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7189 qdepth, SDEBUG_CANQUEUE);
7193 if (qdepth != sdev->queue_depth)
7194 scsi_change_queue_depth(sdev, qdepth);
7196 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7197 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7198 __func__, qdepth, num_in_q);
7200 block_unblock_all_queues(false);
7201 return sdev->queue_depth;
7204 static bool fake_timeout(struct scsi_cmnd *scp)
7206 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7207 if (sdebug_every_nth < -1)
7208 sdebug_every_nth = -1;
7209 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7210 return true; /* ignore command causing timeout */
7211 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7212 scsi_medium_access_command(scp))
7213 return true; /* time out reads and writes */
7218 /* Response to TUR or media access command when device stopped */
7219 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7223 ktime_t now_ts = ktime_get_boottime();
7224 struct scsi_device *sdp = scp->device;
7226 stopped_state = atomic_read(&devip->stopped);
7227 if (stopped_state == 2) {
7228 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7229 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7230 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7231 /* tur_ms_to_ready timer extinguished */
7232 atomic_set(&devip->stopped, 0);
7236 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7238 sdev_printk(KERN_INFO, sdp,
7239 "%s: Not ready: in process of becoming ready\n", my_name);
7240 if (scp->cmnd[0] == TEST_UNIT_READY) {
7241 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7243 if (diff_ns <= tur_nanosecs_to_ready)
7244 diff_ns = tur_nanosecs_to_ready - diff_ns;
7246 diff_ns = tur_nanosecs_to_ready;
7247 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7248 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7249 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7251 return check_condition_result;
7254 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7256 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7258 return check_condition_result;
7261 static int sdebug_map_queues(struct Scsi_Host *shost)
7265 if (shost->nr_hw_queues == 1)
7268 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7269 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7273 if (i == HCTX_TYPE_DEFAULT)
7274 map->nr_queues = submit_queues - poll_queues;
7275 else if (i == HCTX_TYPE_POLL)
7276 map->nr_queues = poll_queues;
7278 if (!map->nr_queues) {
7279 BUG_ON(i == HCTX_TYPE_DEFAULT);
7283 map->queue_offset = qoff;
7284 blk_mq_map_queues(map);
7286 qoff += map->nr_queues;
7293 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7296 bool retiring = false;
7297 int num_entries = 0;
7298 unsigned int qc_idx = 0;
7299 unsigned long iflags;
7300 ktime_t kt_from_boot = ktime_get_boottime();
7301 struct sdebug_queue *sqp;
7302 struct sdebug_queued_cmd *sqcp;
7303 struct scsi_cmnd *scp;
7304 struct sdebug_dev_info *devip;
7305 struct sdebug_defer *sd_dp;
7307 sqp = sdebug_q_arr + queue_num;
7308 spin_lock_irqsave(&sqp->qc_lock, iflags);
7310 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7312 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7315 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7317 if (unlikely(qc_idx >= sdebug_max_queue))
7320 sqcp = &sqp->qc_arr[qc_idx];
7321 sd_dp = sqcp->sd_dp;
7322 if (unlikely(!sd_dp))
7325 if (unlikely(scp == NULL)) {
7326 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7327 queue_num, qc_idx, __func__);
7330 if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7331 if (kt_from_boot < sd_dp->cmpl_ts)
7334 } else /* ignoring non REQ_HIPRI requests */
7336 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7338 atomic_dec(&devip->num_in_q);
7340 pr_err("devip=NULL from %s\n", __func__);
7341 if (unlikely(atomic_read(&retired_max_queue) > 0))
7344 sqcp->a_cmnd = NULL;
7345 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7346 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7347 sqp, queue_num, qc_idx, __func__);
7350 if (unlikely(retiring)) { /* user has reduced max_queue */
7353 retval = atomic_read(&retired_max_queue);
7354 if (qc_idx >= retval) {
7355 pr_err("index %d too large\n", retval);
7358 k = find_last_bit(sqp->in_use_bm, retval);
7359 if ((k < sdebug_max_queue) || (k == retval))
7360 atomic_set(&retired_max_queue, 0);
7362 atomic_set(&retired_max_queue, k + 1);
7364 sd_dp->defer_t = SDEB_DEFER_NONE;
7365 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7366 scp->scsi_done(scp); /* callback to mid level */
7367 spin_lock_irqsave(&sqp->qc_lock, iflags);
7370 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7371 if (num_entries > 0)
7372 atomic_add(num_entries, &sdeb_mq_poll_count);
7376 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7377 struct scsi_cmnd *scp)
7380 struct scsi_device *sdp = scp->device;
7381 const struct opcode_info_t *oip;
7382 const struct opcode_info_t *r_oip;
7383 struct sdebug_dev_info *devip;
7384 u8 *cmd = scp->cmnd;
7385 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7386 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7389 u64 lun_index = sdp->lun & 0x3FFF;
7396 scsi_set_resid(scp, 0);
7397 if (sdebug_statistics) {
7398 atomic_inc(&sdebug_cmnd_count);
7399 inject_now = inject_on_this_cmd();
7403 if (unlikely(sdebug_verbose &&
7404 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7409 sb = (int)sizeof(b);
7411 strcpy(b, "too long, over 32 bytes");
7413 for (k = 0, n = 0; k < len && n < sb; ++k)
7414 n += scnprintf(b + n, sb - n, "%02x ",
7417 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7418 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7420 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7421 return SCSI_MLQUEUE_HOST_BUSY;
7422 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7423 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7426 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7427 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7428 devip = (struct sdebug_dev_info *)sdp->hostdata;
7429 if (unlikely(!devip)) {
7430 devip = find_build_dev_info(sdp);
7434 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7435 atomic_set(&sdeb_inject_pending, 1);
7437 na = oip->num_attached;
7439 if (na) { /* multiple commands with this opcode */
7441 if (FF_SA & r_oip->flags) {
7442 if (F_SA_LOW & oip->flags)
7445 sa = get_unaligned_be16(cmd + 8);
7446 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7447 if (opcode == oip->opcode && sa == oip->sa)
7450 } else { /* since no service action only check opcode */
7451 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7452 if (opcode == oip->opcode)
7457 if (F_SA_LOW & r_oip->flags)
7458 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7459 else if (F_SA_HIGH & r_oip->flags)
7460 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7462 mk_sense_invalid_opcode(scp);
7465 } /* else (when na==0) we assume the oip is a match */
7467 if (unlikely(F_INV_OP & flags)) {
7468 mk_sense_invalid_opcode(scp);
7471 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7473 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7474 my_name, opcode, " supported for wlun");
7475 mk_sense_invalid_opcode(scp);
7478 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7482 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7483 rem = ~oip->len_mask[k] & cmd[k];
7485 for (j = 7; j >= 0; --j, rem <<= 1) {
7489 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7494 if (unlikely(!(F_SKIP_UA & flags) &&
7495 find_first_bit(devip->uas_bm,
7496 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7497 errsts = make_ua(scp, devip);
7501 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7502 atomic_read(&devip->stopped))) {
7503 errsts = resp_not_ready(scp, devip);
7507 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7509 if (unlikely(sdebug_every_nth)) {
7510 if (fake_timeout(scp))
7511 return 0; /* ignore command: make trouble */
7513 if (likely(oip->pfp))
7514 pfp = oip->pfp; /* calls a resp_* function */
7516 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7519 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7520 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7521 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7522 sdebug_ndelay > 10000)) {
7524 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7525 * for Start Stop Unit (SSU) want at least 1 second delay and
7526 * if sdebug_jdelay>1 want a long delay of that many seconds.
7527 * For Synchronize Cache want 1/20 of SSU's delay.
7529 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7530 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7532 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7533 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7535 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7538 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7540 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7543 static struct scsi_host_template sdebug_driver_template = {
7544 .show_info = scsi_debug_show_info,
7545 .write_info = scsi_debug_write_info,
7546 .proc_name = sdebug_proc_name,
7547 .name = "SCSI DEBUG",
7548 .info = scsi_debug_info,
7549 .slave_alloc = scsi_debug_slave_alloc,
7550 .slave_configure = scsi_debug_slave_configure,
7551 .slave_destroy = scsi_debug_slave_destroy,
7552 .ioctl = scsi_debug_ioctl,
7553 .queuecommand = scsi_debug_queuecommand,
7554 .change_queue_depth = sdebug_change_qdepth,
7555 .map_queues = sdebug_map_queues,
7556 .mq_poll = sdebug_blk_mq_poll,
7557 .eh_abort_handler = scsi_debug_abort,
7558 .eh_device_reset_handler = scsi_debug_device_reset,
7559 .eh_target_reset_handler = scsi_debug_target_reset,
7560 .eh_bus_reset_handler = scsi_debug_bus_reset,
7561 .eh_host_reset_handler = scsi_debug_host_reset,
7562 .can_queue = SDEBUG_CANQUEUE,
7564 .sg_tablesize = SG_MAX_SEGMENTS,
7565 .cmd_per_lun = DEF_CMD_PER_LUN,
7567 .max_segment_size = -1U,
7568 .module = THIS_MODULE,
7569 .track_queue_depth = 1,
7572 static int sdebug_driver_probe(struct device *dev)
7575 struct sdebug_host_info *sdbg_host;
7576 struct Scsi_Host *hpnt;
7579 sdbg_host = to_sdebug_host(dev);
7581 sdebug_driver_template.can_queue = sdebug_max_queue;
7582 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7583 if (!sdebug_clustering)
7584 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7586 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7588 pr_err("scsi_host_alloc failed\n");
7592 if (submit_queues > nr_cpu_ids) {
7593 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7594 my_name, submit_queues, nr_cpu_ids);
7595 submit_queues = nr_cpu_ids;
7598 * Decide whether to tell scsi subsystem that we want mq. The
7599 * following should give the same answer for each host.
7601 hpnt->nr_hw_queues = submit_queues;
7602 if (sdebug_host_max_queue)
7603 hpnt->host_tagset = 1;
7605 /* poll queues are possible for nr_hw_queues > 1 */
7606 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7607 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7608 my_name, poll_queues, hpnt->nr_hw_queues);
7613 * Poll queues don't need interrupts, but we need at least one I/O queue
7614 * left over for non-polled I/O.
7615 * If condition not met, trim poll_queues to 1 (just for simplicity).
7617 if (poll_queues >= submit_queues) {
7618 if (submit_queues < 3)
7619 pr_warn("%s: trim poll_queues to 1\n", my_name);
7621 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7622 my_name, submit_queues - 1);
7628 sdbg_host->shost = hpnt;
7629 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7630 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7631 hpnt->max_id = sdebug_num_tgts + 1;
7633 hpnt->max_id = sdebug_num_tgts;
7634 /* = sdebug_max_luns; */
7635 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7639 switch (sdebug_dif) {
7641 case T10_PI_TYPE1_PROTECTION:
7642 hprot = SHOST_DIF_TYPE1_PROTECTION;
7644 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7647 case T10_PI_TYPE2_PROTECTION:
7648 hprot = SHOST_DIF_TYPE2_PROTECTION;
7650 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7653 case T10_PI_TYPE3_PROTECTION:
7654 hprot = SHOST_DIF_TYPE3_PROTECTION;
7656 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7661 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7665 scsi_host_set_prot(hpnt, hprot);
7667 if (have_dif_prot || sdebug_dix)
7668 pr_info("host protection%s%s%s%s%s%s%s\n",
7669 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7670 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7671 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7672 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7673 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7674 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7675 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7677 if (sdebug_guard == 1)
7678 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7680 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7682 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7683 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7684 if (sdebug_every_nth) /* need stats counters for every_nth */
7685 sdebug_statistics = true;
7686 error = scsi_add_host(hpnt, &sdbg_host->dev);
7688 pr_err("scsi_add_host failed\n");
7690 scsi_host_put(hpnt);
7692 scsi_scan_host(hpnt);
7698 static void sdebug_driver_remove(struct device *dev)
7700 struct sdebug_host_info *sdbg_host;
7701 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7703 sdbg_host = to_sdebug_host(dev);
7705 scsi_remove_host(sdbg_host->shost);
7707 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7709 list_del(&sdbg_devinfo->dev_list);
7710 kfree(sdbg_devinfo->zstate);
7711 kfree(sdbg_devinfo);
7714 scsi_host_put(sdbg_host->shost);
7717 static int pseudo_lld_bus_match(struct device *dev,
7718 struct device_driver *dev_driver)
7723 static struct bus_type pseudo_lld_bus = {
7725 .match = pseudo_lld_bus_match,
7726 .probe = sdebug_driver_probe,
7727 .remove = sdebug_driver_remove,
7728 .drv_groups = sdebug_drv_groups,