1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER = 0x0,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
288 atomic_t stopped; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
310 struct list_head dev_info_list;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 struct sdebug_defer {
329 struct execute_work ew;
330 ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
333 int hc_idx; /* hostwide tag index */
338 bool aborted; /* true when blk_abort_request() already called */
339 enum sdeb_defer_type defer_t;
342 struct sdebug_queued_cmd {
343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 * instance indicates this slot is in use.
346 struct sdebug_defer *sd_dp;
347 struct scsi_cmnd *a_cmnd;
350 struct sdebug_queue {
351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 atomic_t blocked; /* to temporarily stop more being queued */
357 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
358 static atomic_t sdebug_completions; /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
364 struct opcode_info_t {
365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
366 /* for terminating element */
367 u8 opcode; /* if num_attached > 0, preferred */
368 u16 sa; /* service action */
369 u32 flags; /* OR-ed set of SDEB_F_* */
370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 SDEB_I_INVALID_OPCODE = 0,
380 SDEB_I_REPORT_LUNS = 2,
381 SDEB_I_REQUEST_SENSE = 3,
382 SDEB_I_TEST_UNIT_READY = 4,
383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
385 SDEB_I_LOG_SENSE = 7,
386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
389 SDEB_I_START_STOP = 11,
390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
392 SDEB_I_MAINT_IN = 14,
393 SDEB_I_MAINT_OUT = 15,
394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
396 SDEB_I_RESERVE = 18, /* 6, 10 */
397 SDEB_I_RELEASE = 19, /* 6, 10 */
398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
400 SDEB_I_ATA_PT = 22, /* 12, 16 */
401 SDEB_I_SEND_DIAG = 23,
403 SDEB_I_WRITE_BUFFER = 25,
404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
406 SDEB_I_COMP_WRITE = 28,
407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 0, 0, 0, SDEB_I_VERIFY,
442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 SDEB_I_MAINT_OUT, 0, 0, 0,
448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 * The following "response" functions return the SCSI mid-level's 4 byte
461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462 * command completion, they can mask their return value with
463 * SDEG_RES_IMMED_MASK .
465 #define SDEG_RES_IMMED_MASK 0x40000000
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
505 * The following are overflow arrays for cdbs that "hit" the same index in
506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507 * should be placed in opcode_info_arr[], the others should be placed here.
509 static const struct opcode_info_t msense_iarr[] = {
510 {0, 0x1a, 0, F_D_IN, NULL, NULL,
511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 static const struct opcode_info_t mselect_iarr[] = {
515 {0, 0x15, 0, F_D_OUT, NULL, NULL,
516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 static const struct opcode_info_t read_iarr[] = {
520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530 static const struct opcode_info_t write_iarr[] = {
531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xbf, 0xc7, 0, 0, 0, 0} },
542 static const struct opcode_info_t verify_iarr[] = {
543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
554 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
563 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
572 static const struct opcode_info_t write_same_iarr[] = {
573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
578 static const struct opcode_info_t reserve_iarr[] = {
579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 static const struct opcode_info_t release_iarr[] = {
584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
600 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
612 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620 * plus the terminating elements for logic that scans this table such as
621 * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 0, 0} }, /* REPORT LUNS */
631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 resp_write_dt0, write_iarr, /* WRITE(16) */
654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 0xff, 0, 0xc7, 0, 0, 0, 0} },
670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 NULL, release_iarr, /* RELEASE(10) <no response function> */
686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 0, 0, 0, 0} }, /* WRITE_BUFFER */
703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 resp_sync_cache, sync_cache_iarr,
709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 resp_pre_fetch, pre_fetch_iarr,
716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 0, 0, 0, 0} }, /* PRE-FETCH (10) */
720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue; /* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 SAM_LUN_AM_FLAT = 0x1,
795 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity; /* in sectors */
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804 may still need them */
805 static int sdebug_heads; /* heads per disk */
806 static int sdebug_cylinders_per; /* cylinders per surface */
807 static int sdebug_sectors_per; /* sectors per cylinder */
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1; /* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
834 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
841 static rwlock_t *ramdisk_lck_a[2];
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
846 static struct bus_type pseudo_lld_bus;
848 static struct device_driver sdebug_driverfs_driver = {
849 .name = sdebug_proc_name,
850 .bus = &pseudo_lld_bus,
853 static const int check_condition_result =
854 SAM_STAT_CHECK_CONDITION;
856 static const int illegal_condition_result =
857 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859 static const int device_qfull_result =
860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
865 /* Only do the extra work involved in logical block provisioning if one or
866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867 * real reads and writes (i.e. not skipping them for speed).
869 static inline bool scsi_debug_lbp(void)
871 return 0 == sdebug_fake_rw &&
872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 unsigned long long lba)
878 struct sdeb_store_info *lsip = sip;
880 lba = do_div(lba, sdebug_store_sectors);
881 if (!sip || !sip->storep) {
883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
885 return lsip->storep + lba * sdebug_sector_size;
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
891 sector = sector_div(sector, sdebug_store_sectors);
893 return sip->dif_storep + sector;
896 static void sdebug_max_tgts_luns(void)
898 struct sdebug_host_info *sdbg_host;
899 struct Scsi_Host *hpnt;
901 spin_lock(&sdebug_host_list_lock);
902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 hpnt = sdbg_host->shost;
904 if ((hpnt->this_id >= 0) &&
905 (sdebug_num_tgts > hpnt->this_id))
906 hpnt->max_id = sdebug_num_tgts + 1;
908 hpnt->max_id = sdebug_num_tgts;
909 /* sdebug_max_luns; */
910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 spin_unlock(&sdebug_host_list_lock);
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 enum sdeb_cmd_data c_d,
920 int in_byte, int in_bit)
922 unsigned char *sbuff;
926 sbuff = scp->sense_buffer;
928 sdev_printk(KERN_ERR, scp->device,
929 "%s: sense_buffer is NULL\n", __func__);
932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
935 memset(sks, 0, sizeof(sks));
941 sks[0] |= 0x7 & in_bit;
943 put_unaligned_be16(in_byte, sks + 1);
949 memcpy(sbuff + sl + 4, sks, 3);
951 memcpy(sbuff + 15, sks, 3);
953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 if (!scp->sense_buffer) {
961 sdev_printk(KERN_ERR, scp->device,
962 "%s: sense_buffer is NULL\n", __func__);
965 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
967 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
970 sdev_printk(KERN_INFO, scp->device,
971 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 my_name, key, asc, asq);
975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
983 if (sdebug_verbose) {
985 sdev_printk(KERN_INFO, dev,
986 "%s: BLKFLSBUF [0x1261]\n", __func__);
987 else if (0x5331 == cmd)
988 sdev_printk(KERN_INFO, dev,
989 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
992 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
996 /* return -ENOTTY; // correct return but upsets fdisk */
999 static void config_cdb_len(struct scsi_device *sdev)
1001 switch (sdebug_cdb_len) {
1002 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 sdev->use_10_for_rw = false;
1004 sdev->use_16_for_rw = false;
1005 sdev->use_10_for_ms = false;
1007 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 sdev->use_10_for_rw = true;
1009 sdev->use_16_for_rw = false;
1010 sdev->use_10_for_ms = false;
1012 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 sdev->use_10_for_rw = true;
1014 sdev->use_16_for_rw = false;
1015 sdev->use_10_for_ms = true;
1018 sdev->use_10_for_rw = false;
1019 sdev->use_16_for_rw = true;
1020 sdev->use_10_for_ms = true;
1022 case 32: /* No knobs to suggest this so same as 16 for now */
1023 sdev->use_10_for_rw = false;
1024 sdev->use_16_for_rw = true;
1025 sdev->use_10_for_ms = true;
1028 pr_warn("unexpected cdb_len=%d, force to 10\n",
1030 sdev->use_10_for_rw = true;
1031 sdev->use_16_for_rw = false;
1032 sdev->use_10_for_ms = false;
1033 sdebug_cdb_len = 10;
1038 static void all_config_cdb_len(void)
1040 struct sdebug_host_info *sdbg_host;
1041 struct Scsi_Host *shost;
1042 struct scsi_device *sdev;
1044 spin_lock(&sdebug_host_list_lock);
1045 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 shost = sdbg_host->shost;
1047 shost_for_each_device(sdev, shost) {
1048 config_cdb_len(sdev);
1051 spin_unlock(&sdebug_host_list_lock);
1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1056 struct sdebug_host_info *sdhp;
1057 struct sdebug_dev_info *dp;
1059 spin_lock(&sdebug_host_list_lock);
1060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 if ((devip->sdbg_host == dp->sdbg_host) &&
1063 (devip->target == dp->target))
1064 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1067 spin_unlock(&sdebug_host_list_lock);
1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1074 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 if (k != SDEBUG_NUM_UAS) {
1076 const char *cp = NULL;
1080 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 POWER_ON_RESET_ASCQ);
1083 cp = "power on reset";
1085 case SDEBUG_UA_BUS_RESET:
1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 case SDEBUG_UA_MODE_CHANGED:
1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1095 cp = "mode parameters changed";
1097 case SDEBUG_UA_CAPACITY_CHANGED:
1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 CAPACITY_CHANGED_ASCQ);
1101 cp = "capacity data changed";
1103 case SDEBUG_UA_MICROCODE_CHANGED:
1104 mk_sense_buffer(scp, UNIT_ATTENTION,
1106 MICROCODE_CHANGED_ASCQ);
1108 cp = "microcode has been changed";
1110 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1111 mk_sense_buffer(scp, UNIT_ATTENTION,
1113 MICROCODE_CHANGED_WO_RESET_ASCQ);
1115 cp = "microcode has been changed without reset";
1117 case SDEBUG_UA_LUNS_CHANGED:
1119 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 * on the target, until a REPORT LUNS command is
1122 * received. SPC-4 behavior is to report it only once.
1123 * NOTE: sdebug_scsi_level does not use the same
1124 * values as struct scsi_device->scsi_level.
1126 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1127 clear_luns_changed_on_target(devip);
1128 mk_sense_buffer(scp, UNIT_ATTENTION,
1132 cp = "reported luns data has changed";
1135 pr_warn("unexpected unit attention code=%d\n", k);
1140 clear_bit(k, devip->uas_bm);
1142 sdev_printk(KERN_INFO, scp->device,
1143 "%s reports: Unit attention: %s\n",
1145 return check_condition_result;
1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1155 struct scsi_data_buffer *sdb = &scp->sdb;
1159 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1160 return DID_ERROR << 16;
1162 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1164 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171 * calls, not required to write in ascending offset order. Assumes resid
1172 * set to scsi_bufflen() prior to any calls.
1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 int arr_len, unsigned int off_dst)
1177 unsigned int act_len, n;
1178 struct scsi_data_buffer *sdb = &scp->sdb;
1179 off_t skip = off_dst;
1181 if (sdb->length <= off_dst)
1183 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1184 return DID_ERROR << 16;
1186 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 arr, arr_len, skip);
1188 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1189 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 scsi_get_resid(scp));
1191 n = scsi_bufflen(scp) - (off_dst + act_len);
1192 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197 * 'arr' or -1 if error.
1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1202 if (!scsi_bufflen(scp))
1204 if (scp->sc_data_direction != DMA_TO_DEVICE)
1207 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1211 static char sdebug_inq_vendor_id[9] = "Linux ";
1212 static char sdebug_inq_product_id[17] = "scsi_debug ";
1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1214 /* Use some locally assigned NAAs for SAS addresses. */
1215 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1219 /* Device identification VPD page. Returns number of bytes placed in arr */
1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 int target_dev_id, int dev_id_num,
1222 const char *dev_id_str, int dev_id_str_len,
1223 const uuid_t *lu_name)
1228 port_a = target_dev_id + 1;
1229 /* T10 vendor identifier field format (faked) */
1230 arr[0] = 0x2; /* ASCII */
1233 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 memcpy(&arr[12], sdebug_inq_product_id, 16);
1235 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 num = 8 + 16 + dev_id_str_len;
1239 if (dev_id_num >= 0) {
1240 if (sdebug_uuid_ctl) {
1241 /* Locally assigned UUID */
1242 arr[num++] = 0x1; /* binary (not necessarily sas) */
1243 arr[num++] = 0xa; /* PIV=0, lu, naa */
1246 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1248 memcpy(arr + num, lu_name, 16);
1251 /* NAA-3, Logical unit identifier (binary) */
1252 arr[num++] = 0x1; /* binary (not necessarily sas) */
1253 arr[num++] = 0x3; /* PIV=0, lu, naa */
1256 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1259 /* Target relative port number */
1260 arr[num++] = 0x61; /* proto=sas, binary */
1261 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1262 arr[num++] = 0x0; /* reserved */
1263 arr[num++] = 0x4; /* length */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0; /* reserved */
1267 arr[num++] = 0x1; /* relative port A */
1269 /* NAA-3, Target port identifier */
1270 arr[num++] = 0x61; /* proto=sas, binary */
1271 arr[num++] = 0x93; /* piv=1, target port, naa */
1274 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1276 /* NAA-3, Target port group identifier */
1277 arr[num++] = 0x61; /* proto=sas, binary */
1278 arr[num++] = 0x95; /* piv=1, target port group id */
1283 put_unaligned_be16(port_group_id, arr + num);
1285 /* NAA-3, Target device identifier */
1286 arr[num++] = 0x61; /* proto=sas, binary */
1287 arr[num++] = 0xa3; /* piv=1, target device, naa */
1290 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1292 /* SCSI name string: Target device identifier */
1293 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1294 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1297 memcpy(arr + num, "naa.32222220", 12);
1299 snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 memcpy(arr + num, b, 8);
1302 memset(arr + num, 0, 4);
1307 static unsigned char vpd84_data[] = {
1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309 0x22,0x22,0x22,0x0,0xbb,0x1,
1310 0x22,0x22,0x22,0x0,0xbb,0x2,
1313 /* Software interface identification VPD page */
1314 static int inquiry_vpd_84(unsigned char *arr)
1316 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 return sizeof(vpd84_data);
1320 /* Management network addresses VPD page */
1321 static int inquiry_vpd_85(unsigned char *arr)
1324 const char *na1 = "https://www.kernel.org/config";
1325 const char *na2 = "http://www.kernel.org/log";
1328 arr[num++] = 0x1; /* lu, storage config */
1329 arr[num++] = 0x0; /* reserved */
1334 plen = ((plen / 4) + 1) * 4;
1335 arr[num++] = plen; /* length, null termianted, padded */
1336 memcpy(arr + num, na1, olen);
1337 memset(arr + num + olen, 0, plen - olen);
1340 arr[num++] = 0x4; /* lu, logging */
1341 arr[num++] = 0x0; /* reserved */
1346 plen = ((plen / 4) + 1) * 4;
1347 arr[num++] = plen; /* length, null terminated, padded */
1348 memcpy(arr + num, na2, olen);
1349 memset(arr + num + olen, 0, plen - olen);
1355 /* SCSI ports VPD page */
1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 port_a = target_dev_id + 1;
1362 port_b = port_a + 1;
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0; /* reserved */
1366 arr[num++] = 0x1; /* relative port 1 (primary) */
1367 memset(arr + num, 0, 6);
1370 arr[num++] = 12; /* length tp descriptor */
1371 /* naa-5 target port identifier (A) */
1372 arr[num++] = 0x61; /* proto=sas, binary */
1373 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1374 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x8; /* length */
1376 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0; /* reserved */
1381 arr[num++] = 0x2; /* relative port 2 (secondary) */
1382 memset(arr + num, 0, 6);
1385 arr[num++] = 12; /* length tp descriptor */
1386 /* naa-5 target port identifier (B) */
1387 arr[num++] = 0x61; /* proto=sas, binary */
1388 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1389 arr[num++] = 0x0; /* reserved */
1390 arr[num++] = 0x8; /* length */
1391 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1398 static unsigned char vpd89_data[] = {
1399 /* from 4th byte */ 0,0,0,0,
1400 'l','i','n','u','x',' ',' ',' ',
1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1442 /* ATA Information VPD page */
1443 static int inquiry_vpd_89(unsigned char *arr)
1445 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 return sizeof(vpd89_data);
1450 static unsigned char vpdb0_data[] = {
1451 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 /* Block limits VPD page (SBC-3) */
1458 static int inquiry_vpd_b0(unsigned char *arr)
1462 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1464 /* Optimal transfer length granularity */
1465 if (sdebug_opt_xferlen_exp != 0 &&
1466 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 gran = 1 << sdebug_opt_xferlen_exp;
1469 gran = 1 << sdebug_physblk_exp;
1470 put_unaligned_be16(gran, arr + 2);
1472 /* Maximum Transfer Length */
1473 if (sdebug_store_sectors > 0x400)
1474 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1476 /* Optimal Transfer Length */
1477 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1480 /* Maximum Unmap LBA Count */
1481 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1483 /* Maximum Unmap Block Descriptor Count */
1484 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1487 /* Unmap Granularity Alignment */
1488 if (sdebug_unmap_alignment) {
1489 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1490 arr[28] |= 0x80; /* UGAVALID */
1493 /* Optimal Unmap Granularity */
1494 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1496 /* Maximum WRITE SAME Length */
1497 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1499 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1501 return sizeof(vpdb0_data);
1504 /* Block device characteristics VPD page (SBC-3) */
1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1507 memset(arr, 0, 0x3c);
1509 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1511 arr[3] = 5; /* less than 1.8" */
1512 if (devip->zmodel == BLK_ZONED_HA)
1513 arr[4] = 1 << 4; /* zoned field = 01b */
1518 /* Logical block provisioning VPD page (SBC-4) */
1519 static int inquiry_vpd_b2(unsigned char *arr)
1521 memset(arr, 0, 0x4);
1522 arr[0] = 0; /* threshold exponent */
1529 if (sdebug_lbprz && scsi_debug_lbp())
1530 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1531 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 /* threshold_percentage=0 */
1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1540 memset(arr, 0, 0x3c);
1541 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1543 * Set Optimal number of open sequential write preferred zones and
1544 * Optimal number of non-sequentially written sequential write
1545 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 * fields set to zero, apart from Max. number of open swrz_s field.
1548 put_unaligned_be32(0xffffffff, &arr[4]);
1549 put_unaligned_be32(0xffffffff, &arr[8]);
1550 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1551 put_unaligned_be32(devip->max_open, &arr[12]);
1553 put_unaligned_be32(0xffffffff, &arr[12]);
1557 #define SDEBUG_LONG_INQ_SZ 96
1558 #define SDEBUG_MAX_INQ_ARR_SZ 584
1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1562 unsigned char pq_pdt;
1564 unsigned char *cmd = scp->cmnd;
1567 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1569 alloc_len = get_unaligned_be16(cmd + 3);
1570 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1572 return DID_REQUEUE << 16;
1573 is_disk = (sdebug_ptype == TYPE_DISK);
1574 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1575 is_disk_zbc = (is_disk || is_zbc);
1576 have_wlun = scsi_is_wlun(scp->device->lun);
1578 pq_pdt = TYPE_WLUN; /* present, wlun */
1579 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1580 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1582 pq_pdt = (sdebug_ptype & 0x1f);
1584 if (0x2 & cmd[1]) { /* CMDDT bit set */
1585 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1587 return check_condition_result;
1588 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1589 int lu_id_num, port_group_id, target_dev_id;
1592 int host_no = devip->sdbg_host->shost->host_no;
1594 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1595 (devip->channel & 0x7f);
1596 if (sdebug_vpd_use_hostno == 0)
1598 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1599 (devip->target * 1000) + devip->lun);
1600 target_dev_id = ((host_no + 1) * 2000) +
1601 (devip->target * 1000) - 3;
1602 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1603 if (0 == cmd[2]) { /* supported vital product data pages */
1604 arr[1] = cmd[2]; /*sanity */
1606 arr[n++] = 0x0; /* this page */
1607 arr[n++] = 0x80; /* unit serial number */
1608 arr[n++] = 0x83; /* device identification */
1609 arr[n++] = 0x84; /* software interface ident. */
1610 arr[n++] = 0x85; /* management network addresses */
1611 arr[n++] = 0x86; /* extended inquiry */
1612 arr[n++] = 0x87; /* mode page policy */
1613 arr[n++] = 0x88; /* SCSI ports */
1614 if (is_disk_zbc) { /* SBC or ZBC */
1615 arr[n++] = 0x89; /* ATA information */
1616 arr[n++] = 0xb0; /* Block limits */
1617 arr[n++] = 0xb1; /* Block characteristics */
1619 arr[n++] = 0xb2; /* LB Provisioning */
1621 arr[n++] = 0xb6; /* ZB dev. char. */
1623 arr[3] = n - 4; /* number of supported VPD pages */
1624 } else if (0x80 == cmd[2]) { /* unit serial number */
1625 arr[1] = cmd[2]; /*sanity */
1627 memcpy(&arr[4], lu_id_str, len);
1628 } else if (0x83 == cmd[2]) { /* device identification */
1629 arr[1] = cmd[2]; /*sanity */
1630 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1631 target_dev_id, lu_id_num,
1634 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1635 arr[1] = cmd[2]; /*sanity */
1636 arr[3] = inquiry_vpd_84(&arr[4]);
1637 } else if (0x85 == cmd[2]) { /* Management network addresses */
1638 arr[1] = cmd[2]; /*sanity */
1639 arr[3] = inquiry_vpd_85(&arr[4]);
1640 } else if (0x86 == cmd[2]) { /* extended inquiry */
1641 arr[1] = cmd[2]; /*sanity */
1642 arr[3] = 0x3c; /* number of following entries */
1643 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1644 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1645 else if (have_dif_prot)
1646 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1648 arr[4] = 0x0; /* no protection stuff */
1649 arr[5] = 0x7; /* head of q, ordered + simple q's */
1650 } else if (0x87 == cmd[2]) { /* mode page policy */
1651 arr[1] = cmd[2]; /*sanity */
1652 arr[3] = 0x8; /* number of following entries */
1653 arr[4] = 0x2; /* disconnect-reconnect mp */
1654 arr[6] = 0x80; /* mlus, shared */
1655 arr[8] = 0x18; /* protocol specific lu */
1656 arr[10] = 0x82; /* mlus, per initiator port */
1657 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1658 arr[1] = cmd[2]; /*sanity */
1659 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1660 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1661 arr[1] = cmd[2]; /*sanity */
1662 n = inquiry_vpd_89(&arr[4]);
1663 put_unaligned_be16(n, arr + 2);
1664 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1665 arr[1] = cmd[2]; /*sanity */
1666 arr[3] = inquiry_vpd_b0(&arr[4]);
1667 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1668 arr[1] = cmd[2]; /*sanity */
1669 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1670 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1671 arr[1] = cmd[2]; /*sanity */
1672 arr[3] = inquiry_vpd_b2(&arr[4]);
1673 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1674 arr[1] = cmd[2]; /*sanity */
1675 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1677 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1679 return check_condition_result;
1681 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1682 ret = fill_from_dev_buffer(scp, arr,
1683 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1687 /* drops through here for a standard inquiry */
1688 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1689 arr[2] = sdebug_scsi_level;
1690 arr[3] = 2; /* response_data_format==2 */
1691 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1692 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1693 if (sdebug_vpd_use_hostno == 0)
1694 arr[5] |= 0x10; /* claim: implicit TPGS */
1695 arr[6] = 0x10; /* claim: MultiP */
1696 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1697 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1698 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1699 memcpy(&arr[16], sdebug_inq_product_id, 16);
1700 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1701 /* Use Vendor Specific area to place driver date in ASCII hex */
1702 memcpy(&arr[36], sdebug_version_date, 8);
1703 /* version descriptors (2 bytes each) follow */
1704 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1705 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1707 if (is_disk) { /* SBC-4 no version claimed */
1708 put_unaligned_be16(0x600, arr + n);
1710 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1711 put_unaligned_be16(0x525, arr + n);
1713 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1714 put_unaligned_be16(0x624, arr + n);
1717 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1718 ret = fill_from_dev_buffer(scp, arr,
1719 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1724 /* See resp_iec_m_pg() for how this data is manipulated */
1725 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1728 static int resp_requests(struct scsi_cmnd *scp,
1729 struct sdebug_dev_info *devip)
1731 unsigned char *cmd = scp->cmnd;
1732 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1733 bool dsense = !!(cmd[1] & 1);
1734 u32 alloc_len = cmd[4];
1736 int stopped_state = atomic_read(&devip->stopped);
1738 memset(arr, 0, sizeof(arr));
1739 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1743 arr[2] = LOGICAL_UNIT_NOT_READY;
1744 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1748 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1749 arr[7] = 0xa; /* 18 byte sense buffer */
1750 arr[12] = LOGICAL_UNIT_NOT_READY;
1751 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1753 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1754 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1757 arr[1] = 0x0; /* NO_SENSE in sense_key */
1758 arr[2] = THRESHOLD_EXCEEDED;
1759 arr[3] = 0xff; /* Failure prediction(false) */
1763 arr[2] = 0x0; /* NO_SENSE in sense_key */
1764 arr[7] = 0xa; /* 18 byte sense buffer */
1765 arr[12] = THRESHOLD_EXCEEDED;
1766 arr[13] = 0xff; /* Failure prediction(false) */
1768 } else { /* nothing to report */
1771 memset(arr, 0, len);
1774 memset(arr, 0, len);
1779 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1782 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1784 unsigned char *cmd = scp->cmnd;
1785 int power_cond, want_stop, stopped_state;
1788 power_cond = (cmd[4] & 0xf0) >> 4;
1790 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1791 return check_condition_result;
1793 want_stop = !(cmd[4] & 1);
1794 stopped_state = atomic_read(&devip->stopped);
1795 if (stopped_state == 2) {
1796 ktime_t now_ts = ktime_get_boottime();
1798 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1799 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1801 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1802 /* tur_ms_to_ready timer extinguished */
1803 atomic_set(&devip->stopped, 0);
1807 if (stopped_state == 2) {
1809 stopped_state = 1; /* dummy up success */
1810 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1811 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1812 return check_condition_result;
1816 changing = (stopped_state != want_stop);
1818 atomic_xchg(&devip->stopped, want_stop);
1819 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1820 return SDEG_RES_IMMED_MASK;
1825 static sector_t get_sdebug_capacity(void)
1827 static const unsigned int gibibyte = 1073741824;
1829 if (sdebug_virtual_gb > 0)
1830 return (sector_t)sdebug_virtual_gb *
1831 (gibibyte / sdebug_sector_size);
1833 return sdebug_store_sectors;
1836 #define SDEBUG_READCAP_ARR_SZ 8
1837 static int resp_readcap(struct scsi_cmnd *scp,
1838 struct sdebug_dev_info *devip)
1840 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1843 /* following just in case virtual_gb changed */
1844 sdebug_capacity = get_sdebug_capacity();
1845 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1846 if (sdebug_capacity < 0xffffffff) {
1847 capac = (unsigned int)sdebug_capacity - 1;
1848 put_unaligned_be32(capac, arr + 0);
1850 put_unaligned_be32(0xffffffff, arr + 0);
1851 put_unaligned_be16(sdebug_sector_size, arr + 6);
1852 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1855 #define SDEBUG_READCAP16_ARR_SZ 32
1856 static int resp_readcap16(struct scsi_cmnd *scp,
1857 struct sdebug_dev_info *devip)
1859 unsigned char *cmd = scp->cmnd;
1860 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1863 alloc_len = get_unaligned_be32(cmd + 10);
1864 /* following just in case virtual_gb changed */
1865 sdebug_capacity = get_sdebug_capacity();
1866 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1867 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1868 put_unaligned_be32(sdebug_sector_size, arr + 8);
1869 arr[13] = sdebug_physblk_exp & 0xf;
1870 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1872 if (scsi_debug_lbp()) {
1873 arr[14] |= 0x80; /* LBPME */
1874 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1875 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1876 * in the wider field maps to 0 in this field.
1878 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1883 * Since the scsi_debug READ CAPACITY implementation always reports the
1884 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1886 if (devip->zmodel == BLK_ZONED_HM)
1889 arr[15] = sdebug_lowest_aligned & 0xff;
1891 if (have_dif_prot) {
1892 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1893 arr[12] |= 1; /* PROT_EN */
1896 return fill_from_dev_buffer(scp, arr,
1897 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1900 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1902 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1903 struct sdebug_dev_info *devip)
1905 unsigned char *cmd = scp->cmnd;
1907 int host_no = devip->sdbg_host->shost->host_no;
1908 int port_group_a, port_group_b, port_a, port_b;
1912 alen = get_unaligned_be32(cmd + 6);
1913 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1915 return DID_REQUEUE << 16;
1917 * EVPD page 0x88 states we have two ports, one
1918 * real and a fake port with no device connected.
1919 * So we create two port groups with one port each
1920 * and set the group with port B to unavailable.
1922 port_a = 0x1; /* relative port A */
1923 port_b = 0x2; /* relative port B */
1924 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1925 (devip->channel & 0x7f);
1926 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1927 (devip->channel & 0x7f) + 0x80;
1930 * The asymmetric access state is cycled according to the host_id.
1933 if (sdebug_vpd_use_hostno == 0) {
1934 arr[n++] = host_no % 3; /* Asymm access state */
1935 arr[n++] = 0x0F; /* claim: all states are supported */
1937 arr[n++] = 0x0; /* Active/Optimized path */
1938 arr[n++] = 0x01; /* only support active/optimized paths */
1940 put_unaligned_be16(port_group_a, arr + n);
1942 arr[n++] = 0; /* Reserved */
1943 arr[n++] = 0; /* Status code */
1944 arr[n++] = 0; /* Vendor unique */
1945 arr[n++] = 0x1; /* One port per group */
1946 arr[n++] = 0; /* Reserved */
1947 arr[n++] = 0; /* Reserved */
1948 put_unaligned_be16(port_a, arr + n);
1950 arr[n++] = 3; /* Port unavailable */
1951 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1952 put_unaligned_be16(port_group_b, arr + n);
1954 arr[n++] = 0; /* Reserved */
1955 arr[n++] = 0; /* Status code */
1956 arr[n++] = 0; /* Vendor unique */
1957 arr[n++] = 0x1; /* One port per group */
1958 arr[n++] = 0; /* Reserved */
1959 arr[n++] = 0; /* Reserved */
1960 put_unaligned_be16(port_b, arr + n);
1964 put_unaligned_be32(rlen, arr + 0);
1967 * Return the smallest value of either
1968 * - The allocated length
1969 * - The constructed command length
1970 * - The maximum array size
1972 rlen = min(alen, n);
1973 ret = fill_from_dev_buffer(scp, arr,
1974 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1979 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1980 struct sdebug_dev_info *devip)
1983 u8 reporting_opts, req_opcode, sdeb_i, supp;
1985 u32 alloc_len, a_len;
1986 int k, offset, len, errsts, count, bump, na;
1987 const struct opcode_info_t *oip;
1988 const struct opcode_info_t *r_oip;
1990 u8 *cmd = scp->cmnd;
1992 rctd = !!(cmd[2] & 0x80);
1993 reporting_opts = cmd[2] & 0x7;
1994 req_opcode = cmd[3];
1995 req_sa = get_unaligned_be16(cmd + 4);
1996 alloc_len = get_unaligned_be32(cmd + 6);
1997 if (alloc_len < 4 || alloc_len > 0xffff) {
1998 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1999 return check_condition_result;
2001 if (alloc_len > 8192)
2005 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2007 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2009 return check_condition_result;
2011 switch (reporting_opts) {
2012 case 0: /* all commands */
2013 /* count number of commands */
2014 for (count = 0, oip = opcode_info_arr;
2015 oip->num_attached != 0xff; ++oip) {
2016 if (F_INV_OP & oip->flags)
2018 count += (oip->num_attached + 1);
2020 bump = rctd ? 20 : 8;
2021 put_unaligned_be32(count * bump, arr);
2022 for (offset = 4, oip = opcode_info_arr;
2023 oip->num_attached != 0xff && offset < a_len; ++oip) {
2024 if (F_INV_OP & oip->flags)
2026 na = oip->num_attached;
2027 arr[offset] = oip->opcode;
2028 put_unaligned_be16(oip->sa, arr + offset + 2);
2030 arr[offset + 5] |= 0x2;
2031 if (FF_SA & oip->flags)
2032 arr[offset + 5] |= 0x1;
2033 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2035 put_unaligned_be16(0xa, arr + offset + 8);
2037 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2038 if (F_INV_OP & oip->flags)
2041 arr[offset] = oip->opcode;
2042 put_unaligned_be16(oip->sa, arr + offset + 2);
2044 arr[offset + 5] |= 0x2;
2045 if (FF_SA & oip->flags)
2046 arr[offset + 5] |= 0x1;
2047 put_unaligned_be16(oip->len_mask[0],
2050 put_unaligned_be16(0xa,
2057 case 1: /* one command: opcode only */
2058 case 2: /* one command: opcode plus service action */
2059 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2060 sdeb_i = opcode_ind_arr[req_opcode];
2061 oip = &opcode_info_arr[sdeb_i];
2062 if (F_INV_OP & oip->flags) {
2066 if (1 == reporting_opts) {
2067 if (FF_SA & oip->flags) {
2068 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2071 return check_condition_result;
2074 } else if (2 == reporting_opts &&
2075 0 == (FF_SA & oip->flags)) {
2076 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2077 kfree(arr); /* point at requested sa */
2078 return check_condition_result;
2080 if (0 == (FF_SA & oip->flags) &&
2081 req_opcode == oip->opcode)
2083 else if (0 == (FF_SA & oip->flags)) {
2084 na = oip->num_attached;
2085 for (k = 0, oip = oip->arrp; k < na;
2087 if (req_opcode == oip->opcode)
2090 supp = (k >= na) ? 1 : 3;
2091 } else if (req_sa != oip->sa) {
2092 na = oip->num_attached;
2093 for (k = 0, oip = oip->arrp; k < na;
2095 if (req_sa == oip->sa)
2098 supp = (k >= na) ? 1 : 3;
2102 u = oip->len_mask[0];
2103 put_unaligned_be16(u, arr + 2);
2104 arr[4] = oip->opcode;
2105 for (k = 1; k < u; ++k)
2106 arr[4 + k] = (k < 16) ?
2107 oip->len_mask[k] : 0xff;
2112 arr[1] = (rctd ? 0x80 : 0) | supp;
2114 put_unaligned_be16(0xa, arr + offset);
2119 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2121 return check_condition_result;
2123 offset = (offset < a_len) ? offset : a_len;
2124 len = (offset < alloc_len) ? offset : alloc_len;
2125 errsts = fill_from_dev_buffer(scp, arr, len);
2130 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2131 struct sdebug_dev_info *devip)
2136 u8 *cmd = scp->cmnd;
2138 memset(arr, 0, sizeof(arr));
2139 repd = !!(cmd[2] & 0x80);
2140 alloc_len = get_unaligned_be32(cmd + 6);
2141 if (alloc_len < 4) {
2142 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2143 return check_condition_result;
2145 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2146 arr[1] = 0x1; /* ITNRS */
2153 len = (len < alloc_len) ? len : alloc_len;
2154 return fill_from_dev_buffer(scp, arr, len);
2157 /* <<Following mode page info copied from ST318451LW>> */
2159 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2160 { /* Read-Write Error Recovery page for mode_sense */
2161 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2164 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2166 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2167 return sizeof(err_recov_pg);
2170 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2171 { /* Disconnect-Reconnect page for mode_sense */
2172 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2173 0, 0, 0, 0, 0, 0, 0, 0};
2175 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2177 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2178 return sizeof(disconnect_pg);
2181 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2182 { /* Format device page for mode_sense */
2183 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2184 0, 0, 0, 0, 0, 0, 0, 0,
2185 0, 0, 0, 0, 0x40, 0, 0, 0};
2187 memcpy(p, format_pg, sizeof(format_pg));
2188 put_unaligned_be16(sdebug_sectors_per, p + 10);
2189 put_unaligned_be16(sdebug_sector_size, p + 12);
2190 if (sdebug_removable)
2191 p[20] |= 0x20; /* should agree with INQUIRY */
2193 memset(p + 2, 0, sizeof(format_pg) - 2);
2194 return sizeof(format_pg);
2197 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2198 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2201 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2202 { /* Caching page for mode_sense */
2203 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2204 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2205 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2206 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2208 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2209 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2210 memcpy(p, caching_pg, sizeof(caching_pg));
2212 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2213 else if (2 == pcontrol)
2214 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2215 return sizeof(caching_pg);
2218 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2221 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2222 { /* Control mode page for mode_sense */
2223 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2225 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2229 ctrl_m_pg[2] |= 0x4;
2231 ctrl_m_pg[2] &= ~0x4;
2234 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2236 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2238 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2239 else if (2 == pcontrol)
2240 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2241 return sizeof(ctrl_m_pg);
2245 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2246 { /* Informational Exceptions control mode page for mode_sense */
2247 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2249 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2252 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2254 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2255 else if (2 == pcontrol)
2256 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2257 return sizeof(iec_m_pg);
2260 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2261 { /* SAS SSP mode page - short format for mode_sense */
2262 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2263 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2265 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2267 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2268 return sizeof(sas_sf_m_pg);
2272 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2274 { /* SAS phy control and discover mode page for mode_sense */
2275 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2276 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2277 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2278 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2279 0x2, 0, 0, 0, 0, 0, 0, 0,
2280 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2281 0, 0, 0, 0, 0, 0, 0, 0,
2282 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2283 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2284 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2285 0x3, 0, 0, 0, 0, 0, 0, 0,
2286 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2287 0, 0, 0, 0, 0, 0, 0, 0,
2291 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2292 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2293 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2294 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2295 port_a = target_dev_id + 1;
2296 port_b = port_a + 1;
2297 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2298 put_unaligned_be32(port_a, p + 20);
2299 put_unaligned_be32(port_b, p + 48 + 20);
2301 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2302 return sizeof(sas_pcd_m_pg);
2305 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2306 { /* SAS SSP shared protocol specific port mode subpage */
2307 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2308 0, 0, 0, 0, 0, 0, 0, 0,
2311 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2313 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2314 return sizeof(sas_sha_m_pg);
2317 #define SDEBUG_MAX_MSENSE_SZ 256
2319 static int resp_mode_sense(struct scsi_cmnd *scp,
2320 struct sdebug_dev_info *devip)
2322 int pcontrol, pcode, subpcode, bd_len;
2323 unsigned char dev_spec;
2324 u32 alloc_len, offset, len;
2326 int target = scp->device->id;
2328 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2329 unsigned char *cmd = scp->cmnd;
2330 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2332 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2333 pcontrol = (cmd[2] & 0xc0) >> 6;
2334 pcode = cmd[2] & 0x3f;
2336 msense_6 = (MODE_SENSE == cmd[0]);
2337 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2338 is_disk = (sdebug_ptype == TYPE_DISK);
2339 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2340 if ((is_disk || is_zbc) && !dbd)
2341 bd_len = llbaa ? 16 : 8;
2344 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2345 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2346 if (0x3 == pcontrol) { /* Saving values not supported */
2347 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2348 return check_condition_result;
2350 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2351 (devip->target * 1000) - 3;
2352 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2353 if (is_disk || is_zbc) {
2354 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2366 arr[4] = 0x1; /* set LONGLBA bit */
2367 arr[7] = bd_len; /* assume 255 or less */
2371 if ((bd_len > 0) && (!sdebug_capacity))
2372 sdebug_capacity = get_sdebug_capacity();
2375 if (sdebug_capacity > 0xfffffffe)
2376 put_unaligned_be32(0xffffffff, ap + 0);
2378 put_unaligned_be32(sdebug_capacity, ap + 0);
2379 put_unaligned_be16(sdebug_sector_size, ap + 6);
2382 } else if (16 == bd_len) {
2383 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2384 put_unaligned_be32(sdebug_sector_size, ap + 12);
2389 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2390 /* TODO: Control Extension page */
2391 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2392 return check_condition_result;
2397 case 0x1: /* Read-Write error recovery page, direct access */
2398 len = resp_err_recov_pg(ap, pcontrol, target);
2401 case 0x2: /* Disconnect-Reconnect page, all devices */
2402 len = resp_disconnect_pg(ap, pcontrol, target);
2405 case 0x3: /* Format device page, direct access */
2407 len = resp_format_pg(ap, pcontrol, target);
2412 case 0x8: /* Caching page, direct access */
2413 if (is_disk || is_zbc) {
2414 len = resp_caching_pg(ap, pcontrol, target);
2419 case 0xa: /* Control Mode page, all devices */
2420 len = resp_ctrl_m_pg(ap, pcontrol, target);
2423 case 0x19: /* if spc==1 then sas phy, control+discover */
2424 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2425 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2426 return check_condition_result;
2429 if ((0x0 == subpcode) || (0xff == subpcode))
2430 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2431 if ((0x1 == subpcode) || (0xff == subpcode))
2432 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2434 if ((0x2 == subpcode) || (0xff == subpcode))
2435 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2438 case 0x1c: /* Informational Exceptions Mode page, all devices */
2439 len = resp_iec_m_pg(ap, pcontrol, target);
2442 case 0x3f: /* Read all Mode pages */
2443 if ((0 == subpcode) || (0xff == subpcode)) {
2444 len = resp_err_recov_pg(ap, pcontrol, target);
2445 len += resp_disconnect_pg(ap + len, pcontrol, target);
2447 len += resp_format_pg(ap + len, pcontrol,
2449 len += resp_caching_pg(ap + len, pcontrol,
2451 } else if (is_zbc) {
2452 len += resp_caching_pg(ap + len, pcontrol,
2455 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2456 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2457 if (0xff == subpcode) {
2458 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2459 target, target_dev_id);
2460 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2462 len += resp_iec_m_pg(ap + len, pcontrol, target);
2465 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2466 return check_condition_result;
2474 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2475 return check_condition_result;
2478 arr[0] = offset - 1;
2480 put_unaligned_be16((offset - 2), arr + 0);
2481 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2484 #define SDEBUG_MAX_MSELECT_SZ 512
2486 static int resp_mode_select(struct scsi_cmnd *scp,
2487 struct sdebug_dev_info *devip)
2489 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2490 int param_len, res, mpage;
2491 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2492 unsigned char *cmd = scp->cmnd;
2493 int mselect6 = (MODE_SELECT == cmd[0]);
2495 memset(arr, 0, sizeof(arr));
2498 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2499 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2500 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2501 return check_condition_result;
2503 res = fetch_to_dev_buffer(scp, arr, param_len);
2505 return DID_ERROR << 16;
2506 else if (sdebug_verbose && (res < param_len))
2507 sdev_printk(KERN_INFO, scp->device,
2508 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2509 __func__, param_len, res);
2510 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2511 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2512 off = bd_len + (mselect6 ? 4 : 8);
2513 if (md_len > 2 || off >= res) {
2514 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2515 return check_condition_result;
2517 mpage = arr[off] & 0x3f;
2518 ps = !!(arr[off] & 0x80);
2520 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2521 return check_condition_result;
2523 spf = !!(arr[off] & 0x40);
2524 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2526 if ((pg_len + off) > param_len) {
2527 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2528 PARAMETER_LIST_LENGTH_ERR, 0);
2529 return check_condition_result;
2532 case 0x8: /* Caching Mode page */
2533 if (caching_pg[1] == arr[off + 1]) {
2534 memcpy(caching_pg + 2, arr + off + 2,
2535 sizeof(caching_pg) - 2);
2536 goto set_mode_changed_ua;
2539 case 0xa: /* Control Mode page */
2540 if (ctrl_m_pg[1] == arr[off + 1]) {
2541 memcpy(ctrl_m_pg + 2, arr + off + 2,
2542 sizeof(ctrl_m_pg) - 2);
2543 if (ctrl_m_pg[4] & 0x8)
2547 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2548 goto set_mode_changed_ua;
2551 case 0x1c: /* Informational Exceptions Mode page */
2552 if (iec_m_pg[1] == arr[off + 1]) {
2553 memcpy(iec_m_pg + 2, arr + off + 2,
2554 sizeof(iec_m_pg) - 2);
2555 goto set_mode_changed_ua;
2561 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2562 return check_condition_result;
2563 set_mode_changed_ua:
2564 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2568 static int resp_temp_l_pg(unsigned char *arr)
2570 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2571 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2574 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2575 return sizeof(temp_l_pg);
2578 static int resp_ie_l_pg(unsigned char *arr)
2580 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2583 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2584 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2585 arr[4] = THRESHOLD_EXCEEDED;
2588 return sizeof(ie_l_pg);
2591 #define SDEBUG_MAX_LSENSE_SZ 512
2593 static int resp_log_sense(struct scsi_cmnd *scp,
2594 struct sdebug_dev_info *devip)
2596 int ppc, sp, pcode, subpcode;
2597 u32 alloc_len, len, n;
2598 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2599 unsigned char *cmd = scp->cmnd;
2601 memset(arr, 0, sizeof(arr));
2605 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2606 return check_condition_result;
2608 pcode = cmd[2] & 0x3f;
2609 subpcode = cmd[3] & 0xff;
2610 alloc_len = get_unaligned_be16(cmd + 7);
2612 if (0 == subpcode) {
2614 case 0x0: /* Supported log pages log page */
2616 arr[n++] = 0x0; /* this page */
2617 arr[n++] = 0xd; /* Temperature */
2618 arr[n++] = 0x2f; /* Informational exceptions */
2621 case 0xd: /* Temperature log page */
2622 arr[3] = resp_temp_l_pg(arr + 4);
2624 case 0x2f: /* Informational exceptions log page */
2625 arr[3] = resp_ie_l_pg(arr + 4);
2628 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2629 return check_condition_result;
2631 } else if (0xff == subpcode) {
2635 case 0x0: /* Supported log pages and subpages log page */
2638 arr[n++] = 0x0; /* 0,0 page */
2640 arr[n++] = 0xff; /* this page */
2642 arr[n++] = 0x0; /* Temperature */
2644 arr[n++] = 0x0; /* Informational exceptions */
2647 case 0xd: /* Temperature subpages */
2650 arr[n++] = 0x0; /* Temperature */
2653 case 0x2f: /* Informational exceptions subpages */
2656 arr[n++] = 0x0; /* Informational exceptions */
2660 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2661 return check_condition_result;
2664 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2665 return check_condition_result;
2667 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2668 return fill_from_dev_buffer(scp, arr,
2669 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2672 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2674 return devip->nr_zones != 0;
2677 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2678 unsigned long long lba)
2680 return &devip->zstate[lba >> devip->zsize_shift];
2683 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2685 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2688 static void zbc_close_zone(struct sdebug_dev_info *devip,
2689 struct sdeb_zone_state *zsp)
2691 enum sdebug_z_cond zc;
2693 if (zbc_zone_is_conv(zsp))
2697 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2700 if (zc == ZC2_IMPLICIT_OPEN)
2701 devip->nr_imp_open--;
2703 devip->nr_exp_open--;
2705 if (zsp->z_wp == zsp->z_start) {
2706 zsp->z_cond = ZC1_EMPTY;
2708 zsp->z_cond = ZC4_CLOSED;
2713 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2715 struct sdeb_zone_state *zsp = &devip->zstate[0];
2718 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2719 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2720 zbc_close_zone(devip, zsp);
2726 static void zbc_open_zone(struct sdebug_dev_info *devip,
2727 struct sdeb_zone_state *zsp, bool explicit)
2729 enum sdebug_z_cond zc;
2731 if (zbc_zone_is_conv(zsp))
2735 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2736 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2739 /* Close an implicit open zone if necessary */
2740 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2741 zbc_close_zone(devip, zsp);
2742 else if (devip->max_open &&
2743 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2744 zbc_close_imp_open_zone(devip);
2746 if (zsp->z_cond == ZC4_CLOSED)
2749 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2750 devip->nr_exp_open++;
2752 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2753 devip->nr_imp_open++;
2757 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2758 struct sdeb_zone_state *zsp)
2760 switch (zsp->z_cond) {
2761 case ZC2_IMPLICIT_OPEN:
2762 devip->nr_imp_open--;
2764 case ZC3_EXPLICIT_OPEN:
2765 devip->nr_exp_open--;
2768 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2769 zsp->z_start, zsp->z_cond);
2772 zsp->z_cond = ZC5_FULL;
2775 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2776 unsigned long long lba, unsigned int num)
2778 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2779 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2781 if (zbc_zone_is_conv(zsp))
2784 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2786 if (zsp->z_wp >= zend)
2787 zbc_set_zone_full(devip, zsp);
2792 if (lba != zsp->z_wp)
2793 zsp->z_non_seq_resource = true;
2799 } else if (end > zsp->z_wp) {
2805 if (zsp->z_wp >= zend)
2806 zbc_set_zone_full(devip, zsp);
2812 zend = zsp->z_start + zsp->z_size;
2817 static int check_zbc_access_params(struct scsi_cmnd *scp,
2818 unsigned long long lba, unsigned int num, bool write)
2820 struct scsi_device *sdp = scp->device;
2821 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2822 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2823 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2826 if (devip->zmodel == BLK_ZONED_HA)
2828 /* For host-managed, reads cannot cross zone types boundaries */
2829 if (zsp_end != zsp &&
2830 zbc_zone_is_conv(zsp) &&
2831 !zbc_zone_is_conv(zsp_end)) {
2832 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2835 return check_condition_result;
2840 /* No restrictions for writes within conventional zones */
2841 if (zbc_zone_is_conv(zsp)) {
2842 if (!zbc_zone_is_conv(zsp_end)) {
2843 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2845 WRITE_BOUNDARY_ASCQ);
2846 return check_condition_result;
2851 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2852 /* Writes cannot cross sequential zone boundaries */
2853 if (zsp_end != zsp) {
2854 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2856 WRITE_BOUNDARY_ASCQ);
2857 return check_condition_result;
2859 /* Cannot write full zones */
2860 if (zsp->z_cond == ZC5_FULL) {
2861 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2862 INVALID_FIELD_IN_CDB, 0);
2863 return check_condition_result;
2865 /* Writes must be aligned to the zone WP */
2866 if (lba != zsp->z_wp) {
2867 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2869 UNALIGNED_WRITE_ASCQ);
2870 return check_condition_result;
2874 /* Handle implicit open of closed and empty zones */
2875 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2876 if (devip->max_open &&
2877 devip->nr_exp_open >= devip->max_open) {
2878 mk_sense_buffer(scp, DATA_PROTECT,
2881 return check_condition_result;
2883 zbc_open_zone(devip, zsp, false);
2889 static inline int check_device_access_params
2890 (struct scsi_cmnd *scp, unsigned long long lba,
2891 unsigned int num, bool write)
2893 struct scsi_device *sdp = scp->device;
2894 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2896 if (lba + num > sdebug_capacity) {
2897 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2898 return check_condition_result;
2900 /* transfer length excessive (tie in to block limits VPD page) */
2901 if (num > sdebug_store_sectors) {
2902 /* needs work to find which cdb byte 'num' comes from */
2903 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2904 return check_condition_result;
2906 if (write && unlikely(sdebug_wp)) {
2907 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2908 return check_condition_result;
2910 if (sdebug_dev_is_zoned(devip))
2911 return check_zbc_access_params(scp, lba, num, write);
2917 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2918 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2919 * that access any of the "stores" in struct sdeb_store_info should call this
2920 * function with bug_if_fake_rw set to true.
2922 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2923 bool bug_if_fake_rw)
2925 if (sdebug_fake_rw) {
2926 BUG_ON(bug_if_fake_rw); /* See note above */
2929 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2932 /* Returns number of bytes copied or -1 if error. */
2933 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2934 u32 sg_skip, u64 lba, u32 num, bool do_write)
2937 u64 block, rest = 0;
2938 enum dma_data_direction dir;
2939 struct scsi_data_buffer *sdb = &scp->sdb;
2943 dir = DMA_TO_DEVICE;
2944 write_since_sync = true;
2946 dir = DMA_FROM_DEVICE;
2949 if (!sdb->length || !sip)
2951 if (scp->sc_data_direction != dir)
2955 block = do_div(lba, sdebug_store_sectors);
2956 if (block + num > sdebug_store_sectors)
2957 rest = block + num - sdebug_store_sectors;
2959 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2960 fsp + (block * sdebug_sector_size),
2961 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2962 if (ret != (num - rest) * sdebug_sector_size)
2966 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2967 fsp, rest * sdebug_sector_size,
2968 sg_skip + ((num - rest) * sdebug_sector_size),
2975 /* Returns number of bytes copied or -1 if error. */
2976 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2978 struct scsi_data_buffer *sdb = &scp->sdb;
2982 if (scp->sc_data_direction != DMA_TO_DEVICE)
2984 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2985 num * sdebug_sector_size, 0, true);
2988 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2989 * arr into sip->storep+lba and return true. If comparison fails then
2991 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2992 const u8 *arr, bool compare_only)
2995 u64 block, rest = 0;
2996 u32 store_blks = sdebug_store_sectors;
2997 u32 lb_size = sdebug_sector_size;
2998 u8 *fsp = sip->storep;
3000 block = do_div(lba, store_blks);
3001 if (block + num > store_blks)
3002 rest = block + num - store_blks;
3004 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3008 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3014 arr += num * lb_size;
3015 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3017 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3021 static __be16 dif_compute_csum(const void *buf, int len)
3026 csum = (__force __be16)ip_compute_csum(buf, len);
3028 csum = cpu_to_be16(crc_t10dif(buf, len));
3033 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3034 sector_t sector, u32 ei_lba)
3036 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3038 if (sdt->guard_tag != csum) {
3039 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3040 (unsigned long)sector,
3041 be16_to_cpu(sdt->guard_tag),
3045 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3046 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3047 pr_err("REF check failed on sector %lu\n",
3048 (unsigned long)sector);
3051 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3052 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3053 pr_err("REF check failed on sector %lu\n",
3054 (unsigned long)sector);
3060 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3061 unsigned int sectors, bool read)
3065 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3066 scp->device->hostdata, true);
3067 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3068 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3069 struct sg_mapping_iter miter;
3071 /* Bytes of protection data to copy into sgl */
3072 resid = sectors * sizeof(*dif_storep);
3074 sg_miter_start(&miter, scsi_prot_sglist(scp),
3075 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3076 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3078 while (sg_miter_next(&miter) && resid > 0) {
3079 size_t len = min_t(size_t, miter.length, resid);
3080 void *start = dif_store(sip, sector);
3083 if (dif_store_end < start + len)
3084 rest = start + len - dif_store_end;
3089 memcpy(paddr, start, len - rest);
3091 memcpy(start, paddr, len - rest);
3095 memcpy(paddr + len - rest, dif_storep, rest);
3097 memcpy(dif_storep, paddr + len - rest, rest);
3100 sector += len / sizeof(*dif_storep);
3103 sg_miter_stop(&miter);
3106 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3107 unsigned int sectors, u32 ei_lba)
3112 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3113 scp->device->hostdata, true);
3114 struct t10_pi_tuple *sdt;
3116 for (i = 0; i < sectors; i++, ei_lba++) {
3117 sector = start_sec + i;
3118 sdt = dif_store(sip, sector);
3120 if (sdt->app_tag == cpu_to_be16(0xffff))
3124 * Because scsi_debug acts as both initiator and
3125 * target we proceed to verify the PI even if
3126 * RDPROTECT=3. This is done so the "initiator" knows
3127 * which type of error to return. Otherwise we would
3128 * have to iterate over the PI twice.
3130 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3131 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3140 dif_copy_prot(scp, start_sec, sectors, true);
3146 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3153 struct sdeb_store_info *sip = devip2sip(devip, true);
3154 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3155 u8 *cmd = scp->cmnd;
3160 lba = get_unaligned_be64(cmd + 2);
3161 num = get_unaligned_be32(cmd + 10);
3166 lba = get_unaligned_be32(cmd + 2);
3167 num = get_unaligned_be16(cmd + 7);
3172 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3173 (u32)(cmd[1] & 0x1f) << 16;
3174 num = (0 == cmd[4]) ? 256 : cmd[4];
3179 lba = get_unaligned_be32(cmd + 2);
3180 num = get_unaligned_be32(cmd + 6);
3183 case XDWRITEREAD_10:
3185 lba = get_unaligned_be32(cmd + 2);
3186 num = get_unaligned_be16(cmd + 7);
3189 default: /* assume READ(32) */
3190 lba = get_unaligned_be64(cmd + 12);
3191 ei_lba = get_unaligned_be32(cmd + 20);
3192 num = get_unaligned_be32(cmd + 28);
3196 if (unlikely(have_dif_prot && check_prot)) {
3197 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3199 mk_sense_invalid_opcode(scp);
3200 return check_condition_result;
3202 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3203 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3204 (cmd[1] & 0xe0) == 0)
3205 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3208 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3209 atomic_read(&sdeb_inject_pending))) {
3211 atomic_set(&sdeb_inject_pending, 0);
3214 ret = check_device_access_params(scp, lba, num, false);
3217 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3218 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3219 ((lba + num) > sdebug_medium_error_start))) {
3220 /* claim unrecoverable read error */
3221 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3222 /* set info field and valid bit for fixed descriptor */
3223 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3224 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3225 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3226 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3227 put_unaligned_be32(ret, scp->sense_buffer + 3);
3229 scsi_set_resid(scp, scsi_bufflen(scp));
3230 return check_condition_result;
3233 read_lock(macc_lckp);
3236 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3237 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3238 case 1: /* Guard tag error */
3239 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3240 read_unlock(macc_lckp);
3241 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3242 return check_condition_result;
3243 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3244 read_unlock(macc_lckp);
3245 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3246 return illegal_condition_result;
3249 case 3: /* Reference tag error */
3250 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3251 read_unlock(macc_lckp);
3252 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3253 return check_condition_result;
3254 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3255 read_unlock(macc_lckp);
3256 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3257 return illegal_condition_result;
3263 ret = do_device_access(sip, scp, 0, lba, num, false);
3264 read_unlock(macc_lckp);
3265 if (unlikely(ret == -1))
3266 return DID_ERROR << 16;
3268 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3270 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3271 atomic_read(&sdeb_inject_pending))) {
3272 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3273 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3274 atomic_set(&sdeb_inject_pending, 0);
3275 return check_condition_result;
3276 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3277 /* Logical block guard check failed */
3278 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3279 atomic_set(&sdeb_inject_pending, 0);
3280 return illegal_condition_result;
3281 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3282 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3283 atomic_set(&sdeb_inject_pending, 0);
3284 return illegal_condition_result;
3290 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3291 unsigned int sectors, u32 ei_lba)
3294 struct t10_pi_tuple *sdt;
3296 sector_t sector = start_sec;
3299 struct sg_mapping_iter diter;
3300 struct sg_mapping_iter piter;
3302 BUG_ON(scsi_sg_count(SCpnt) == 0);
3303 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3305 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3306 scsi_prot_sg_count(SCpnt),
3307 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3308 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3309 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3311 /* For each protection page */
3312 while (sg_miter_next(&piter)) {
3314 if (WARN_ON(!sg_miter_next(&diter))) {
3319 for (ppage_offset = 0; ppage_offset < piter.length;
3320 ppage_offset += sizeof(struct t10_pi_tuple)) {
3321 /* If we're at the end of the current
3322 * data page advance to the next one
3324 if (dpage_offset >= diter.length) {
3325 if (WARN_ON(!sg_miter_next(&diter))) {
3332 sdt = piter.addr + ppage_offset;
3333 daddr = diter.addr + dpage_offset;
3335 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3336 ret = dif_verify(sdt, daddr, sector, ei_lba);
3343 dpage_offset += sdebug_sector_size;
3345 diter.consumed = dpage_offset;
3346 sg_miter_stop(&diter);
3348 sg_miter_stop(&piter);
3350 dif_copy_prot(SCpnt, start_sec, sectors, false);
3357 sg_miter_stop(&diter);
3358 sg_miter_stop(&piter);
3362 static unsigned long lba_to_map_index(sector_t lba)
3364 if (sdebug_unmap_alignment)
3365 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3366 sector_div(lba, sdebug_unmap_granularity);
3370 static sector_t map_index_to_lba(unsigned long index)
3372 sector_t lba = index * sdebug_unmap_granularity;
3374 if (sdebug_unmap_alignment)
3375 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3379 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3383 unsigned int mapped;
3384 unsigned long index;
3387 index = lba_to_map_index(lba);
3388 mapped = test_bit(index, sip->map_storep);
3391 next = find_next_zero_bit(sip->map_storep, map_size, index);
3393 next = find_next_bit(sip->map_storep, map_size, index);
3395 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3400 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3403 sector_t end = lba + len;
3406 unsigned long index = lba_to_map_index(lba);
3408 if (index < map_size)
3409 set_bit(index, sip->map_storep);
3411 lba = map_index_to_lba(index + 1);
3415 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3418 sector_t end = lba + len;
3419 u8 *fsp = sip->storep;
3422 unsigned long index = lba_to_map_index(lba);
3424 if (lba == map_index_to_lba(index) &&
3425 lba + sdebug_unmap_granularity <= end &&
3427 clear_bit(index, sip->map_storep);
3428 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3429 memset(fsp + lba * sdebug_sector_size,
3430 (sdebug_lbprz & 1) ? 0 : 0xff,
3431 sdebug_sector_size *
3432 sdebug_unmap_granularity);
3434 if (sip->dif_storep) {
3435 memset(sip->dif_storep + lba, 0xff,
3436 sizeof(*sip->dif_storep) *
3437 sdebug_unmap_granularity);
3440 lba = map_index_to_lba(index + 1);
3444 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3451 struct sdeb_store_info *sip = devip2sip(devip, true);
3452 rwlock_t *macc_lckp = &sip->macc_lck;
3453 u8 *cmd = scp->cmnd;
3458 lba = get_unaligned_be64(cmd + 2);
3459 num = get_unaligned_be32(cmd + 10);
3464 lba = get_unaligned_be32(cmd + 2);
3465 num = get_unaligned_be16(cmd + 7);
3470 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3471 (u32)(cmd[1] & 0x1f) << 16;
3472 num = (0 == cmd[4]) ? 256 : cmd[4];
3477 lba = get_unaligned_be32(cmd + 2);
3478 num = get_unaligned_be32(cmd + 6);
3481 case 0x53: /* XDWRITEREAD(10) */
3483 lba = get_unaligned_be32(cmd + 2);
3484 num = get_unaligned_be16(cmd + 7);
3487 default: /* assume WRITE(32) */
3488 lba = get_unaligned_be64(cmd + 12);
3489 ei_lba = get_unaligned_be32(cmd + 20);
3490 num = get_unaligned_be32(cmd + 28);
3494 if (unlikely(have_dif_prot && check_prot)) {
3495 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3497 mk_sense_invalid_opcode(scp);
3498 return check_condition_result;
3500 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3501 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3502 (cmd[1] & 0xe0) == 0)
3503 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3507 write_lock(macc_lckp);
3508 ret = check_device_access_params(scp, lba, num, true);
3510 write_unlock(macc_lckp);
3515 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3516 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3517 case 1: /* Guard tag error */
3518 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3519 write_unlock(macc_lckp);
3520 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3521 return illegal_condition_result;
3522 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3523 write_unlock(macc_lckp);
3524 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3525 return check_condition_result;
3528 case 3: /* Reference tag error */
3529 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3530 write_unlock(macc_lckp);
3531 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3532 return illegal_condition_result;
3533 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3534 write_unlock(macc_lckp);
3535 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3536 return check_condition_result;
3542 ret = do_device_access(sip, scp, 0, lba, num, true);
3543 if (unlikely(scsi_debug_lbp()))
3544 map_region(sip, lba, num);
3545 /* If ZBC zone then bump its write pointer */
3546 if (sdebug_dev_is_zoned(devip))
3547 zbc_inc_wp(devip, lba, num);
3548 write_unlock(macc_lckp);
3549 if (unlikely(-1 == ret))
3550 return DID_ERROR << 16;
3551 else if (unlikely(sdebug_verbose &&
3552 (ret < (num * sdebug_sector_size))))
3553 sdev_printk(KERN_INFO, scp->device,
3554 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3555 my_name, num * sdebug_sector_size, ret);
3557 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3558 atomic_read(&sdeb_inject_pending))) {
3559 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3560 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3561 atomic_set(&sdeb_inject_pending, 0);
3562 return check_condition_result;
3563 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3564 /* Logical block guard check failed */
3565 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3566 atomic_set(&sdeb_inject_pending, 0);
3567 return illegal_condition_result;
3568 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3569 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3570 atomic_set(&sdeb_inject_pending, 0);
3571 return illegal_condition_result;
3578 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3579 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3581 static int resp_write_scat(struct scsi_cmnd *scp,
3582 struct sdebug_dev_info *devip)
3584 u8 *cmd = scp->cmnd;
3587 struct sdeb_store_info *sip = devip2sip(devip, true);
3588 rwlock_t *macc_lckp = &sip->macc_lck;
3590 u16 lbdof, num_lrd, k;
3591 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3592 u32 lb_size = sdebug_sector_size;
3597 static const u32 lrd_size = 32; /* + parameter list header size */
3599 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3601 wrprotect = (cmd[10] >> 5) & 0x7;
3602 lbdof = get_unaligned_be16(cmd + 12);
3603 num_lrd = get_unaligned_be16(cmd + 16);
3604 bt_len = get_unaligned_be32(cmd + 28);
3605 } else { /* that leaves WRITE SCATTERED(16) */
3607 wrprotect = (cmd[2] >> 5) & 0x7;
3608 lbdof = get_unaligned_be16(cmd + 4);
3609 num_lrd = get_unaligned_be16(cmd + 8);
3610 bt_len = get_unaligned_be32(cmd + 10);
3611 if (unlikely(have_dif_prot)) {
3612 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3614 mk_sense_invalid_opcode(scp);
3615 return illegal_condition_result;
3617 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3618 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3620 sdev_printk(KERN_ERR, scp->device,
3621 "Unprotected WR to DIF device\n");
3624 if ((num_lrd == 0) || (bt_len == 0))
3625 return 0; /* T10 says these do-nothings are not errors */
3628 sdev_printk(KERN_INFO, scp->device,
3629 "%s: %s: LB Data Offset field bad\n",
3631 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3632 return illegal_condition_result;
3634 lbdof_blen = lbdof * lb_size;
3635 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3637 sdev_printk(KERN_INFO, scp->device,
3638 "%s: %s: LBA range descriptors don't fit\n",
3640 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3641 return illegal_condition_result;
3643 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3645 return SCSI_MLQUEUE_HOST_BUSY;
3647 sdev_printk(KERN_INFO, scp->device,
3648 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3649 my_name, __func__, lbdof_blen);
3650 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3652 ret = DID_ERROR << 16;
3656 write_lock(macc_lckp);
3657 sg_off = lbdof_blen;
3658 /* Spec says Buffer xfer Length field in number of LBs in dout */
3660 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3661 lba = get_unaligned_be64(up + 0);
3662 num = get_unaligned_be32(up + 8);
3664 sdev_printk(KERN_INFO, scp->device,
3665 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3666 my_name, __func__, k, lba, num, sg_off);
3669 ret = check_device_access_params(scp, lba, num, true);
3671 goto err_out_unlock;
3672 num_by = num * lb_size;
3673 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3675 if ((cum_lb + num) > bt_len) {
3677 sdev_printk(KERN_INFO, scp->device,
3678 "%s: %s: sum of blocks > data provided\n",
3680 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3682 ret = illegal_condition_result;
3683 goto err_out_unlock;
3687 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3688 int prot_ret = prot_verify_write(scp, lba, num,
3692 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3694 ret = illegal_condition_result;
3695 goto err_out_unlock;
3699 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3700 /* If ZBC zone then bump its write pointer */
3701 if (sdebug_dev_is_zoned(devip))
3702 zbc_inc_wp(devip, lba, num);
3703 if (unlikely(scsi_debug_lbp()))
3704 map_region(sip, lba, num);
3705 if (unlikely(-1 == ret)) {
3706 ret = DID_ERROR << 16;
3707 goto err_out_unlock;
3708 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3709 sdev_printk(KERN_INFO, scp->device,
3710 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3711 my_name, num_by, ret);
3713 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3714 atomic_read(&sdeb_inject_pending))) {
3715 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3716 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3717 atomic_set(&sdeb_inject_pending, 0);
3718 ret = check_condition_result;
3719 goto err_out_unlock;
3720 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3721 /* Logical block guard check failed */
3722 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3723 atomic_set(&sdeb_inject_pending, 0);
3724 ret = illegal_condition_result;
3725 goto err_out_unlock;
3726 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3727 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3728 atomic_set(&sdeb_inject_pending, 0);
3729 ret = illegal_condition_result;
3730 goto err_out_unlock;
3738 write_unlock(macc_lckp);
3744 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3745 u32 ei_lba, bool unmap, bool ndob)
3747 struct scsi_device *sdp = scp->device;
3748 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3749 unsigned long long i;
3751 u32 lb_size = sdebug_sector_size;
3753 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3754 scp->device->hostdata, true);
3755 rwlock_t *macc_lckp = &sip->macc_lck;
3759 write_lock(macc_lckp);
3761 ret = check_device_access_params(scp, lba, num, true);
3763 write_unlock(macc_lckp);
3767 if (unmap && scsi_debug_lbp()) {
3768 unmap_region(sip, lba, num);
3772 block = do_div(lbaa, sdebug_store_sectors);
3773 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3775 fs1p = fsp + (block * lb_size);
3777 memset(fs1p, 0, lb_size);
3780 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3783 write_unlock(&sip->macc_lck);
3784 return DID_ERROR << 16;
3785 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3786 sdev_printk(KERN_INFO, scp->device,
3787 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3788 my_name, "write same", lb_size, ret);
3790 /* Copy first sector to remaining blocks */
3791 for (i = 1 ; i < num ; i++) {
3793 block = do_div(lbaa, sdebug_store_sectors);
3794 memmove(fsp + (block * lb_size), fs1p, lb_size);
3796 if (scsi_debug_lbp())
3797 map_region(sip, lba, num);
3798 /* If ZBC zone then bump its write pointer */
3799 if (sdebug_dev_is_zoned(devip))
3800 zbc_inc_wp(devip, lba, num);
3802 write_unlock(macc_lckp);
3807 static int resp_write_same_10(struct scsi_cmnd *scp,
3808 struct sdebug_dev_info *devip)
3810 u8 *cmd = scp->cmnd;
3817 if (sdebug_lbpws10 == 0) {
3818 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3819 return check_condition_result;
3823 lba = get_unaligned_be32(cmd + 2);
3824 num = get_unaligned_be16(cmd + 7);
3825 if (num > sdebug_write_same_length) {
3826 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3827 return check_condition_result;
3829 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3832 static int resp_write_same_16(struct scsi_cmnd *scp,
3833 struct sdebug_dev_info *devip)
3835 u8 *cmd = scp->cmnd;
3842 if (cmd[1] & 0x8) { /* UNMAP */
3843 if (sdebug_lbpws == 0) {
3844 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3845 return check_condition_result;
3849 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3851 lba = get_unaligned_be64(cmd + 2);
3852 num = get_unaligned_be32(cmd + 10);
3853 if (num > sdebug_write_same_length) {
3854 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3855 return check_condition_result;
3857 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3860 /* Note the mode field is in the same position as the (lower) service action
3861 * field. For the Report supported operation codes command, SPC-4 suggests
3862 * each mode of this command should be reported separately; for future. */
3863 static int resp_write_buffer(struct scsi_cmnd *scp,
3864 struct sdebug_dev_info *devip)
3866 u8 *cmd = scp->cmnd;
3867 struct scsi_device *sdp = scp->device;
3868 struct sdebug_dev_info *dp;
3871 mode = cmd[1] & 0x1f;
3873 case 0x4: /* download microcode (MC) and activate (ACT) */
3874 /* set UAs on this device only */
3875 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3876 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3878 case 0x5: /* download MC, save and ACT */
3879 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3881 case 0x6: /* download MC with offsets and ACT */
3882 /* set UAs on most devices (LUs) in this target */
3883 list_for_each_entry(dp,
3884 &devip->sdbg_host->dev_info_list,
3886 if (dp->target == sdp->id) {
3887 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3889 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3893 case 0x7: /* download MC with offsets, save, and ACT */
3894 /* set UA on all devices (LUs) in this target */
3895 list_for_each_entry(dp,
3896 &devip->sdbg_host->dev_info_list,
3898 if (dp->target == sdp->id)
3899 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3903 /* do nothing for this command for other mode values */
3909 static int resp_comp_write(struct scsi_cmnd *scp,
3910 struct sdebug_dev_info *devip)
3912 u8 *cmd = scp->cmnd;
3914 struct sdeb_store_info *sip = devip2sip(devip, true);
3915 rwlock_t *macc_lckp = &sip->macc_lck;
3918 u32 lb_size = sdebug_sector_size;
3923 lba = get_unaligned_be64(cmd + 2);
3924 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3926 return 0; /* degenerate case, not an error */
3927 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3929 mk_sense_invalid_opcode(scp);
3930 return check_condition_result;
3932 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3933 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3934 (cmd[1] & 0xe0) == 0)
3935 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3937 ret = check_device_access_params(scp, lba, num, false);
3941 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3943 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3945 return check_condition_result;
3948 write_lock(macc_lckp);
3950 ret = do_dout_fetch(scp, dnum, arr);
3952 retval = DID_ERROR << 16;
3954 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3955 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3956 "indicated=%u, IO sent=%d bytes\n", my_name,
3957 dnum * lb_size, ret);
3958 if (!comp_write_worker(sip, lba, num, arr, false)) {
3959 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3960 retval = check_condition_result;
3963 if (scsi_debug_lbp())
3964 map_region(sip, lba, num);
3966 write_unlock(macc_lckp);
3971 struct unmap_block_desc {
3977 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3980 struct unmap_block_desc *desc;
3981 struct sdeb_store_info *sip = devip2sip(devip, true);
3982 rwlock_t *macc_lckp = &sip->macc_lck;
3983 unsigned int i, payload_len, descriptors;
3986 if (!scsi_debug_lbp())
3987 return 0; /* fib and say its done */
3988 payload_len = get_unaligned_be16(scp->cmnd + 7);
3989 BUG_ON(scsi_bufflen(scp) != payload_len);
3991 descriptors = (payload_len - 8) / 16;
3992 if (descriptors > sdebug_unmap_max_desc) {
3993 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3994 return check_condition_result;
3997 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3999 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4001 return check_condition_result;
4004 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4006 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4007 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4009 desc = (void *)&buf[8];
4011 write_lock(macc_lckp);
4013 for (i = 0 ; i < descriptors ; i++) {
4014 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4015 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4017 ret = check_device_access_params(scp, lba, num, true);
4021 unmap_region(sip, lba, num);
4027 write_unlock(macc_lckp);
4033 #define SDEBUG_GET_LBA_STATUS_LEN 32
4035 static int resp_get_lba_status(struct scsi_cmnd *scp,
4036 struct sdebug_dev_info *devip)
4038 u8 *cmd = scp->cmnd;
4040 u32 alloc_len, mapped, num;
4042 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4044 lba = get_unaligned_be64(cmd + 2);
4045 alloc_len = get_unaligned_be32(cmd + 10);
4050 ret = check_device_access_params(scp, lba, 1, false);
4054 if (scsi_debug_lbp()) {
4055 struct sdeb_store_info *sip = devip2sip(devip, true);
4057 mapped = map_state(sip, lba, &num);
4060 /* following just in case virtual_gb changed */
4061 sdebug_capacity = get_sdebug_capacity();
4062 if (sdebug_capacity - lba <= 0xffffffff)
4063 num = sdebug_capacity - lba;
4068 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4069 put_unaligned_be32(20, arr); /* Parameter Data Length */
4070 put_unaligned_be64(lba, arr + 8); /* LBA */
4071 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4072 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4074 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4077 static int resp_sync_cache(struct scsi_cmnd *scp,
4078 struct sdebug_dev_info *devip)
4083 u8 *cmd = scp->cmnd;
4085 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4086 lba = get_unaligned_be32(cmd + 2);
4087 num_blocks = get_unaligned_be16(cmd + 7);
4088 } else { /* SYNCHRONIZE_CACHE(16) */
4089 lba = get_unaligned_be64(cmd + 2);
4090 num_blocks = get_unaligned_be32(cmd + 10);
4092 if (lba + num_blocks > sdebug_capacity) {
4093 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4094 return check_condition_result;
4096 if (!write_since_sync || (cmd[1] & 0x2))
4097 res = SDEG_RES_IMMED_MASK;
4098 else /* delay if write_since_sync and IMMED clear */
4099 write_since_sync = false;
4104 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4105 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4106 * a GOOD status otherwise. Model a disk with a big cache and yield
4107 * CONDITION MET. Actually tries to bring range in main memory into the
4108 * cache associated with the CPU(s).
4110 static int resp_pre_fetch(struct scsi_cmnd *scp,
4111 struct sdebug_dev_info *devip)
4115 u64 block, rest = 0;
4117 u8 *cmd = scp->cmnd;
4118 struct sdeb_store_info *sip = devip2sip(devip, true);
4119 rwlock_t *macc_lckp = &sip->macc_lck;
4120 u8 *fsp = sip->storep;
4122 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4123 lba = get_unaligned_be32(cmd + 2);
4124 nblks = get_unaligned_be16(cmd + 7);
4125 } else { /* PRE-FETCH(16) */
4126 lba = get_unaligned_be64(cmd + 2);
4127 nblks = get_unaligned_be32(cmd + 10);
4129 if (lba + nblks > sdebug_capacity) {
4130 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4131 return check_condition_result;
4135 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4136 block = do_div(lba, sdebug_store_sectors);
4137 if (block + nblks > sdebug_store_sectors)
4138 rest = block + nblks - sdebug_store_sectors;
4140 /* Try to bring the PRE-FETCH range into CPU's cache */
4141 read_lock(macc_lckp);
4142 prefetch_range(fsp + (sdebug_sector_size * block),
4143 (nblks - rest) * sdebug_sector_size);
4145 prefetch_range(fsp, rest * sdebug_sector_size);
4146 read_unlock(macc_lckp);
4149 res = SDEG_RES_IMMED_MASK;
4150 return res | condition_met_result;
4153 #define RL_BUCKET_ELEMS 8
4155 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4156 * (W-LUN), the normal Linux scanning logic does not associate it with a
4157 * device (e.g. /dev/sg7). The following magic will make that association:
4158 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4159 * where <n> is a host number. If there are multiple targets in a host then
4160 * the above will associate a W-LUN to each target. To only get a W-LUN
4161 * for target 2, then use "echo '- 2 49409' > scan" .
4163 static int resp_report_luns(struct scsi_cmnd *scp,
4164 struct sdebug_dev_info *devip)
4166 unsigned char *cmd = scp->cmnd;
4167 unsigned int alloc_len;
4168 unsigned char select_report;
4170 struct scsi_lun *lun_p;
4171 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4172 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4173 unsigned int wlun_cnt; /* report luns W-LUN count */
4174 unsigned int tlun_cnt; /* total LUN count */
4175 unsigned int rlen; /* response length (in bytes) */
4177 unsigned int off_rsp = 0;
4178 const int sz_lun = sizeof(struct scsi_lun);
4180 clear_luns_changed_on_target(devip);
4182 select_report = cmd[2];
4183 alloc_len = get_unaligned_be32(cmd + 6);
4185 if (alloc_len < 4) {
4186 pr_err("alloc len too small %d\n", alloc_len);
4187 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4188 return check_condition_result;
4191 switch (select_report) {
4192 case 0: /* all LUNs apart from W-LUNs */
4193 lun_cnt = sdebug_max_luns;
4196 case 1: /* only W-LUNs */
4200 case 2: /* all LUNs */
4201 lun_cnt = sdebug_max_luns;
4204 case 0x10: /* only administrative LUs */
4205 case 0x11: /* see SPC-5 */
4206 case 0x12: /* only subsiduary LUs owned by referenced LU */
4208 pr_debug("select report invalid %d\n", select_report);
4209 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4210 return check_condition_result;
4213 if (sdebug_no_lun_0 && (lun_cnt > 0))
4216 tlun_cnt = lun_cnt + wlun_cnt;
4217 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4218 scsi_set_resid(scp, scsi_bufflen(scp));
4219 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4220 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4222 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4223 lun = sdebug_no_lun_0 ? 1 : 0;
4224 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4225 memset(arr, 0, sizeof(arr));
4226 lun_p = (struct scsi_lun *)&arr[0];
4228 put_unaligned_be32(rlen, &arr[0]);
4232 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4233 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4235 int_to_scsilun(lun++, lun_p);
4236 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4237 lun_p->scsi_lun[0] |= 0x40;
4239 if (j < RL_BUCKET_ELEMS)
4242 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4248 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4252 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4256 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4258 bool is_bytchk3 = false;
4261 u32 vnum, a_num, off;
4262 const u32 lb_size = sdebug_sector_size;
4265 u8 *cmd = scp->cmnd;
4266 struct sdeb_store_info *sip = devip2sip(devip, true);
4267 rwlock_t *macc_lckp = &sip->macc_lck;
4269 bytchk = (cmd[1] >> 1) & 0x3;
4271 return 0; /* always claim internal verify okay */
4272 } else if (bytchk == 2) {
4273 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4274 return check_condition_result;
4275 } else if (bytchk == 3) {
4276 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4280 lba = get_unaligned_be64(cmd + 2);
4281 vnum = get_unaligned_be32(cmd + 10);
4283 case VERIFY: /* is VERIFY(10) */
4284 lba = get_unaligned_be32(cmd + 2);
4285 vnum = get_unaligned_be16(cmd + 7);
4288 mk_sense_invalid_opcode(scp);
4289 return check_condition_result;
4292 return 0; /* not an error */
4293 a_num = is_bytchk3 ? 1 : vnum;
4294 /* Treat following check like one for read (i.e. no write) access */
4295 ret = check_device_access_params(scp, lba, a_num, false);
4299 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4301 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4303 return check_condition_result;
4305 /* Not changing store, so only need read access */
4306 read_lock(macc_lckp);
4308 ret = do_dout_fetch(scp, a_num, arr);
4310 ret = DID_ERROR << 16;
4312 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4313 sdev_printk(KERN_INFO, scp->device,
4314 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4315 my_name, __func__, a_num * lb_size, ret);
4318 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4319 memcpy(arr + off, arr, lb_size);
4322 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4323 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4324 ret = check_condition_result;
4328 read_unlock(macc_lckp);
4333 #define RZONES_DESC_HD 64
4335 /* Report zones depending on start LBA nad reporting options */
4336 static int resp_report_zones(struct scsi_cmnd *scp,
4337 struct sdebug_dev_info *devip)
4339 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4341 u32 alloc_len, rep_opts, rep_len;
4344 u8 *arr = NULL, *desc;
4345 u8 *cmd = scp->cmnd;
4346 struct sdeb_zone_state *zsp;
4347 struct sdeb_store_info *sip = devip2sip(devip, false);
4348 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4350 if (!sdebug_dev_is_zoned(devip)) {
4351 mk_sense_invalid_opcode(scp);
4352 return check_condition_result;
4354 zs_lba = get_unaligned_be64(cmd + 2);
4355 alloc_len = get_unaligned_be32(cmd + 10);
4357 return 0; /* not an error */
4358 rep_opts = cmd[14] & 0x3f;
4359 partial = cmd[14] & 0x80;
4361 if (zs_lba >= sdebug_capacity) {
4362 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4363 return check_condition_result;
4366 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4367 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4370 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4372 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4374 return check_condition_result;
4377 read_lock(macc_lckp);
4380 for (i = 0; i < max_zones; i++) {
4381 lba = zs_lba + devip->zsize * i;
4382 if (lba > sdebug_capacity)
4384 zsp = zbc_zone(devip, lba);
4391 if (zsp->z_cond != ZC1_EMPTY)
4395 /* Implicit open zones */
4396 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4400 /* Explicit open zones */
4401 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4406 if (zsp->z_cond != ZC4_CLOSED)
4411 if (zsp->z_cond != ZC5_FULL)
4418 * Read-only, offline, reset WP recommended are
4419 * not emulated: no zones to report;
4423 /* non-seq-resource set */
4424 if (!zsp->z_non_seq_resource)
4428 /* Not write pointer (conventional) zones */
4429 if (!zbc_zone_is_conv(zsp))
4433 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4434 INVALID_FIELD_IN_CDB, 0);
4435 ret = check_condition_result;
4439 if (nrz < rep_max_zones) {
4440 /* Fill zone descriptor */
4441 desc[0] = zsp->z_type;
4442 desc[1] = zsp->z_cond << 4;
4443 if (zsp->z_non_seq_resource)
4445 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4446 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4447 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4451 if (partial && nrz >= rep_max_zones)
4458 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4459 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4461 rep_len = (unsigned long)desc - (unsigned long)arr;
4462 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4465 read_unlock(macc_lckp);
4470 /* Logic transplanted from tcmu-runner, file_zbc.c */
4471 static void zbc_open_all(struct sdebug_dev_info *devip)
4473 struct sdeb_zone_state *zsp = &devip->zstate[0];
4476 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4477 if (zsp->z_cond == ZC4_CLOSED)
4478 zbc_open_zone(devip, &devip->zstate[i], true);
4482 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4486 enum sdebug_z_cond zc;
4487 u8 *cmd = scp->cmnd;
4488 struct sdeb_zone_state *zsp;
4489 bool all = cmd[14] & 0x01;
4490 struct sdeb_store_info *sip = devip2sip(devip, false);
4491 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4493 if (!sdebug_dev_is_zoned(devip)) {
4494 mk_sense_invalid_opcode(scp);
4495 return check_condition_result;
4498 write_lock(macc_lckp);
4501 /* Check if all closed zones can be open */
4502 if (devip->max_open &&
4503 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4504 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4506 res = check_condition_result;
4509 /* Open all closed zones */
4510 zbc_open_all(devip);
4514 /* Open the specified zone */
4515 z_id = get_unaligned_be64(cmd + 2);
4516 if (z_id >= sdebug_capacity) {
4517 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4518 res = check_condition_result;
4522 zsp = zbc_zone(devip, z_id);
4523 if (z_id != zsp->z_start) {
4524 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4525 res = check_condition_result;
4528 if (zbc_zone_is_conv(zsp)) {
4529 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4530 res = check_condition_result;
4535 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4538 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4539 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4541 res = check_condition_result;
4545 zbc_open_zone(devip, zsp, true);
4547 write_unlock(macc_lckp);
4551 static void zbc_close_all(struct sdebug_dev_info *devip)
4555 for (i = 0; i < devip->nr_zones; i++)
4556 zbc_close_zone(devip, &devip->zstate[i]);
4559 static int resp_close_zone(struct scsi_cmnd *scp,
4560 struct sdebug_dev_info *devip)
4564 u8 *cmd = scp->cmnd;
4565 struct sdeb_zone_state *zsp;
4566 bool all = cmd[14] & 0x01;
4567 struct sdeb_store_info *sip = devip2sip(devip, false);
4568 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4570 if (!sdebug_dev_is_zoned(devip)) {
4571 mk_sense_invalid_opcode(scp);
4572 return check_condition_result;
4575 write_lock(macc_lckp);
4578 zbc_close_all(devip);
4582 /* Close specified zone */
4583 z_id = get_unaligned_be64(cmd + 2);
4584 if (z_id >= sdebug_capacity) {
4585 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4586 res = check_condition_result;
4590 zsp = zbc_zone(devip, z_id);
4591 if (z_id != zsp->z_start) {
4592 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4593 res = check_condition_result;
4596 if (zbc_zone_is_conv(zsp)) {
4597 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4598 res = check_condition_result;
4602 zbc_close_zone(devip, zsp);
4604 write_unlock(macc_lckp);
4608 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4609 struct sdeb_zone_state *zsp, bool empty)
4611 enum sdebug_z_cond zc = zsp->z_cond;
4613 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4614 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4615 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4616 zbc_close_zone(devip, zsp);
4617 if (zsp->z_cond == ZC4_CLOSED)
4619 zsp->z_wp = zsp->z_start + zsp->z_size;
4620 zsp->z_cond = ZC5_FULL;
4624 static void zbc_finish_all(struct sdebug_dev_info *devip)
4628 for (i = 0; i < devip->nr_zones; i++)
4629 zbc_finish_zone(devip, &devip->zstate[i], false);
4632 static int resp_finish_zone(struct scsi_cmnd *scp,
4633 struct sdebug_dev_info *devip)
4635 struct sdeb_zone_state *zsp;
4638 u8 *cmd = scp->cmnd;
4639 bool all = cmd[14] & 0x01;
4640 struct sdeb_store_info *sip = devip2sip(devip, false);
4641 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4643 if (!sdebug_dev_is_zoned(devip)) {
4644 mk_sense_invalid_opcode(scp);
4645 return check_condition_result;
4648 write_lock(macc_lckp);
4651 zbc_finish_all(devip);
4655 /* Finish the specified zone */
4656 z_id = get_unaligned_be64(cmd + 2);
4657 if (z_id >= sdebug_capacity) {
4658 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4659 res = check_condition_result;
4663 zsp = zbc_zone(devip, z_id);
4664 if (z_id != zsp->z_start) {
4665 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4666 res = check_condition_result;
4669 if (zbc_zone_is_conv(zsp)) {
4670 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4671 res = check_condition_result;
4675 zbc_finish_zone(devip, zsp, true);
4677 write_unlock(macc_lckp);
4681 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4682 struct sdeb_zone_state *zsp)
4684 enum sdebug_z_cond zc;
4685 struct sdeb_store_info *sip = devip2sip(devip, false);
4687 if (zbc_zone_is_conv(zsp))
4691 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4692 zbc_close_zone(devip, zsp);
4694 if (zsp->z_cond == ZC4_CLOSED)
4697 if (zsp->z_wp > zsp->z_start)
4698 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4699 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4701 zsp->z_non_seq_resource = false;
4702 zsp->z_wp = zsp->z_start;
4703 zsp->z_cond = ZC1_EMPTY;
4706 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4710 for (i = 0; i < devip->nr_zones; i++)
4711 zbc_rwp_zone(devip, &devip->zstate[i]);
4714 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4716 struct sdeb_zone_state *zsp;
4719 u8 *cmd = scp->cmnd;
4720 bool all = cmd[14] & 0x01;
4721 struct sdeb_store_info *sip = devip2sip(devip, false);
4722 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4724 if (!sdebug_dev_is_zoned(devip)) {
4725 mk_sense_invalid_opcode(scp);
4726 return check_condition_result;
4729 write_lock(macc_lckp);
4736 z_id = get_unaligned_be64(cmd + 2);
4737 if (z_id >= sdebug_capacity) {
4738 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4739 res = check_condition_result;
4743 zsp = zbc_zone(devip, z_id);
4744 if (z_id != zsp->z_start) {
4745 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4746 res = check_condition_result;
4749 if (zbc_zone_is_conv(zsp)) {
4750 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4751 res = check_condition_result;
4755 zbc_rwp_zone(devip, zsp);
4757 write_unlock(macc_lckp);
4761 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4764 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4766 hwq = blk_mq_unique_tag_to_hwq(tag);
4768 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4769 if (WARN_ON_ONCE(hwq >= submit_queues))
4772 return sdebug_q_arr + hwq;
4775 static u32 get_tag(struct scsi_cmnd *cmnd)
4777 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4780 /* Queued (deferred) command completions converge here. */
4781 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4783 bool aborted = sd_dp->aborted;
4786 unsigned long iflags;
4787 struct sdebug_queue *sqp;
4788 struct sdebug_queued_cmd *sqcp;
4789 struct scsi_cmnd *scp;
4790 struct sdebug_dev_info *devip;
4792 if (unlikely(aborted))
4793 sd_dp->aborted = false;
4794 qc_idx = sd_dp->qc_idx;
4795 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4796 if (sdebug_statistics) {
4797 atomic_inc(&sdebug_completions);
4798 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4799 atomic_inc(&sdebug_miss_cpus);
4801 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4802 pr_err("wild qc_idx=%d\n", qc_idx);
4805 spin_lock_irqsave(&sqp->qc_lock, iflags);
4806 sd_dp->defer_t = SDEB_DEFER_NONE;
4807 sqcp = &sqp->qc_arr[qc_idx];
4809 if (unlikely(scp == NULL)) {
4810 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4811 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4812 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4815 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4817 atomic_dec(&devip->num_in_q);
4819 pr_err("devip=NULL\n");
4820 if (unlikely(atomic_read(&retired_max_queue) > 0))
4823 sqcp->a_cmnd = NULL;
4824 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4825 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4826 pr_err("Unexpected completion\n");
4830 if (unlikely(retiring)) { /* user has reduced max_queue */
4833 retval = atomic_read(&retired_max_queue);
4834 if (qc_idx >= retval) {
4835 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4836 pr_err("index %d too large\n", retval);
4839 k = find_last_bit(sqp->in_use_bm, retval);
4840 if ((k < sdebug_max_queue) || (k == retval))
4841 atomic_set(&retired_max_queue, 0);
4843 atomic_set(&retired_max_queue, k + 1);
4845 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4846 if (unlikely(aborted)) {
4848 pr_info("bypassing scsi_done() due to aborted cmd\n");
4851 scp->scsi_done(scp); /* callback to mid level */
4854 /* When high resolution timer goes off this function is called. */
4855 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4857 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4859 sdebug_q_cmd_complete(sd_dp);
4860 return HRTIMER_NORESTART;
4863 /* When work queue schedules work, it calls this function. */
4864 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4866 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4868 sdebug_q_cmd_complete(sd_dp);
4871 static bool got_shared_uuid;
4872 static uuid_t shared_uuid;
4874 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4876 struct sdeb_zone_state *zsp;
4877 sector_t capacity = get_sdebug_capacity();
4878 sector_t zstart = 0;
4882 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4883 * a zone size allowing for at least 4 zones on the device. Otherwise,
4884 * use the specified zone size checking that at least 2 zones can be
4885 * created for the device.
4887 if (!sdeb_zbc_zone_size_mb) {
4888 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4889 >> ilog2(sdebug_sector_size);
4890 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4892 if (devip->zsize < 2) {
4893 pr_err("Device capacity too small\n");
4897 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4898 pr_err("Zone size is not a power of 2\n");
4901 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4902 >> ilog2(sdebug_sector_size);
4903 if (devip->zsize >= capacity) {
4904 pr_err("Zone size too large for device capacity\n");
4909 devip->zsize_shift = ilog2(devip->zsize);
4910 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4912 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4913 pr_err("Number of conventional zones too large\n");
4916 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4918 if (devip->zmodel == BLK_ZONED_HM) {
4919 /* zbc_max_open_zones can be 0, meaning "not reported" */
4920 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4921 devip->max_open = (devip->nr_zones - 1) / 2;
4923 devip->max_open = sdeb_zbc_max_open;
4926 devip->zstate = kcalloc(devip->nr_zones,
4927 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4931 for (i = 0; i < devip->nr_zones; i++) {
4932 zsp = &devip->zstate[i];
4934 zsp->z_start = zstart;
4936 if (i < devip->nr_conv_zones) {
4937 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4938 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4939 zsp->z_wp = (sector_t)-1;
4941 if (devip->zmodel == BLK_ZONED_HM)
4942 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4944 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4945 zsp->z_cond = ZC1_EMPTY;
4946 zsp->z_wp = zsp->z_start;
4949 if (zsp->z_start + devip->zsize < capacity)
4950 zsp->z_size = devip->zsize;
4952 zsp->z_size = capacity - zsp->z_start;
4954 zstart += zsp->z_size;
4960 static struct sdebug_dev_info *sdebug_device_create(
4961 struct sdebug_host_info *sdbg_host, gfp_t flags)
4963 struct sdebug_dev_info *devip;
4965 devip = kzalloc(sizeof(*devip), flags);
4967 if (sdebug_uuid_ctl == 1)
4968 uuid_gen(&devip->lu_name);
4969 else if (sdebug_uuid_ctl == 2) {
4970 if (got_shared_uuid)
4971 devip->lu_name = shared_uuid;
4973 uuid_gen(&shared_uuid);
4974 got_shared_uuid = true;
4975 devip->lu_name = shared_uuid;
4978 devip->sdbg_host = sdbg_host;
4979 if (sdeb_zbc_in_use) {
4980 devip->zmodel = sdeb_zbc_model;
4981 if (sdebug_device_create_zones(devip)) {
4986 devip->zmodel = BLK_ZONED_NONE;
4988 devip->sdbg_host = sdbg_host;
4989 devip->create_ts = ktime_get_boottime();
4990 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4991 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4996 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4998 struct sdebug_host_info *sdbg_host;
4999 struct sdebug_dev_info *open_devip = NULL;
5000 struct sdebug_dev_info *devip;
5002 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5004 pr_err("Host info NULL\n");
5008 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5009 if ((devip->used) && (devip->channel == sdev->channel) &&
5010 (devip->target == sdev->id) &&
5011 (devip->lun == sdev->lun))
5014 if ((!devip->used) && (!open_devip))
5018 if (!open_devip) { /* try and make a new one */
5019 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5021 pr_err("out of memory at line %d\n", __LINE__);
5026 open_devip->channel = sdev->channel;
5027 open_devip->target = sdev->id;
5028 open_devip->lun = sdev->lun;
5029 open_devip->sdbg_host = sdbg_host;
5030 atomic_set(&open_devip->num_in_q, 0);
5031 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5032 open_devip->used = true;
5036 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5039 pr_info("slave_alloc <%u %u %u %llu>\n",
5040 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5044 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5046 struct sdebug_dev_info *devip =
5047 (struct sdebug_dev_info *)sdp->hostdata;
5050 pr_info("slave_configure <%u %u %u %llu>\n",
5051 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5052 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5053 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5054 if (devip == NULL) {
5055 devip = find_build_dev_info(sdp);
5057 return 1; /* no resources, will be marked offline */
5059 sdp->hostdata = devip;
5061 sdp->no_uld_attach = 1;
5062 config_cdb_len(sdp);
5066 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5068 struct sdebug_dev_info *devip =
5069 (struct sdebug_dev_info *)sdp->hostdata;
5072 pr_info("slave_destroy <%u %u %u %llu>\n",
5073 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5075 /* make this slot available for re-use */
5076 devip->used = false;
5077 sdp->hostdata = NULL;
5081 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5082 enum sdeb_defer_type defer_t)
5086 if (defer_t == SDEB_DEFER_HRT)
5087 hrtimer_cancel(&sd_dp->hrt);
5088 else if (defer_t == SDEB_DEFER_WQ)
5089 cancel_work_sync(&sd_dp->ew.work);
5092 /* If @cmnd found deletes its timer or work queue and returns true; else
5094 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5096 unsigned long iflags;
5097 int j, k, qmax, r_qmax;
5098 enum sdeb_defer_type l_defer_t;
5099 struct sdebug_queue *sqp;
5100 struct sdebug_queued_cmd *sqcp;
5101 struct sdebug_dev_info *devip;
5102 struct sdebug_defer *sd_dp;
5104 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5105 spin_lock_irqsave(&sqp->qc_lock, iflags);
5106 qmax = sdebug_max_queue;
5107 r_qmax = atomic_read(&retired_max_queue);
5110 for (k = 0; k < qmax; ++k) {
5111 if (test_bit(k, sqp->in_use_bm)) {
5112 sqcp = &sqp->qc_arr[k];
5113 if (cmnd != sqcp->a_cmnd)
5116 devip = (struct sdebug_dev_info *)
5117 cmnd->device->hostdata;
5119 atomic_dec(&devip->num_in_q);
5120 sqcp->a_cmnd = NULL;
5121 sd_dp = sqcp->sd_dp;
5123 l_defer_t = sd_dp->defer_t;
5124 sd_dp->defer_t = SDEB_DEFER_NONE;
5126 l_defer_t = SDEB_DEFER_NONE;
5127 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5128 stop_qc_helper(sd_dp, l_defer_t);
5129 clear_bit(k, sqp->in_use_bm);
5133 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5138 /* Deletes (stops) timers or work queues of all queued commands */
5139 static void stop_all_queued(void)
5141 unsigned long iflags;
5143 enum sdeb_defer_type l_defer_t;
5144 struct sdebug_queue *sqp;
5145 struct sdebug_queued_cmd *sqcp;
5146 struct sdebug_dev_info *devip;
5147 struct sdebug_defer *sd_dp;
5149 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5150 spin_lock_irqsave(&sqp->qc_lock, iflags);
5151 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5152 if (test_bit(k, sqp->in_use_bm)) {
5153 sqcp = &sqp->qc_arr[k];
5154 if (sqcp->a_cmnd == NULL)
5156 devip = (struct sdebug_dev_info *)
5157 sqcp->a_cmnd->device->hostdata;
5159 atomic_dec(&devip->num_in_q);
5160 sqcp->a_cmnd = NULL;
5161 sd_dp = sqcp->sd_dp;
5163 l_defer_t = sd_dp->defer_t;
5164 sd_dp->defer_t = SDEB_DEFER_NONE;
5166 l_defer_t = SDEB_DEFER_NONE;
5167 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5168 stop_qc_helper(sd_dp, l_defer_t);
5169 clear_bit(k, sqp->in_use_bm);
5170 spin_lock_irqsave(&sqp->qc_lock, iflags);
5173 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5177 /* Free queued command memory on heap */
5178 static void free_all_queued(void)
5181 struct sdebug_queue *sqp;
5182 struct sdebug_queued_cmd *sqcp;
5184 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5185 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5186 sqcp = &sqp->qc_arr[k];
5193 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5199 ok = stop_queued_cmnd(SCpnt);
5200 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5201 sdev_printk(KERN_INFO, SCpnt->device,
5202 "%s: command%s found\n", __func__,
5208 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5211 if (SCpnt && SCpnt->device) {
5212 struct scsi_device *sdp = SCpnt->device;
5213 struct sdebug_dev_info *devip =
5214 (struct sdebug_dev_info *)sdp->hostdata;
5216 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5217 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5219 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5224 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5226 struct sdebug_host_info *sdbg_host;
5227 struct sdebug_dev_info *devip;
5228 struct scsi_device *sdp;
5229 struct Scsi_Host *hp;
5232 ++num_target_resets;
5235 sdp = SCpnt->device;
5238 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5239 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5243 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5245 list_for_each_entry(devip,
5246 &sdbg_host->dev_info_list,
5248 if (devip->target == sdp->id) {
5249 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5253 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5254 sdev_printk(KERN_INFO, sdp,
5255 "%s: %d device(s) found in target\n", __func__, k);
5260 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5262 struct sdebug_host_info *sdbg_host;
5263 struct sdebug_dev_info *devip;
5264 struct scsi_device *sdp;
5265 struct Scsi_Host *hp;
5269 if (!(SCpnt && SCpnt->device))
5271 sdp = SCpnt->device;
5272 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5273 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5276 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5278 list_for_each_entry(devip,
5279 &sdbg_host->dev_info_list,
5281 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5286 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5287 sdev_printk(KERN_INFO, sdp,
5288 "%s: %d device(s) found in host\n", __func__, k);
5293 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5295 struct sdebug_host_info *sdbg_host;
5296 struct sdebug_dev_info *devip;
5300 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5301 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5302 spin_lock(&sdebug_host_list_lock);
5303 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5304 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5306 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5310 spin_unlock(&sdebug_host_list_lock);
5312 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5313 sdev_printk(KERN_INFO, SCpnt->device,
5314 "%s: %d device(s) found\n", __func__, k);
5318 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5320 struct msdos_partition *pp;
5321 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5322 int sectors_per_part, num_sectors, k;
5323 int heads_by_sects, start_sec, end_sec;
5325 /* assume partition table already zeroed */
5326 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5328 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5329 sdebug_num_parts = SDEBUG_MAX_PARTS;
5330 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5332 num_sectors = (int)get_sdebug_capacity();
5333 sectors_per_part = (num_sectors - sdebug_sectors_per)
5335 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5336 starts[0] = sdebug_sectors_per;
5337 max_part_secs = sectors_per_part;
5338 for (k = 1; k < sdebug_num_parts; ++k) {
5339 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5341 if (starts[k] - starts[k - 1] < max_part_secs)
5342 max_part_secs = starts[k] - starts[k - 1];
5344 starts[sdebug_num_parts] = num_sectors;
5345 starts[sdebug_num_parts + 1] = 0;
5347 ramp[510] = 0x55; /* magic partition markings */
5349 pp = (struct msdos_partition *)(ramp + 0x1be);
5350 for (k = 0; starts[k + 1]; ++k, ++pp) {
5351 start_sec = starts[k];
5352 end_sec = starts[k] + max_part_secs - 1;
5355 pp->cyl = start_sec / heads_by_sects;
5356 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5357 / sdebug_sectors_per;
5358 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5360 pp->end_cyl = end_sec / heads_by_sects;
5361 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5362 / sdebug_sectors_per;
5363 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5365 pp->start_sect = cpu_to_le32(start_sec);
5366 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5367 pp->sys_ind = 0x83; /* plain Linux partition */
5371 static void block_unblock_all_queues(bool block)
5374 struct sdebug_queue *sqp;
5376 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5377 atomic_set(&sqp->blocked, (int)block);
5380 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5381 * commands will be processed normally before triggers occur.
5383 static void tweak_cmnd_count(void)
5387 modulo = abs(sdebug_every_nth);
5390 block_unblock_all_queues(true);
5391 count = atomic_read(&sdebug_cmnd_count);
5392 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5393 block_unblock_all_queues(false);
5396 static void clear_queue_stats(void)
5398 atomic_set(&sdebug_cmnd_count, 0);
5399 atomic_set(&sdebug_completions, 0);
5400 atomic_set(&sdebug_miss_cpus, 0);
5401 atomic_set(&sdebug_a_tsf, 0);
5404 static bool inject_on_this_cmd(void)
5406 if (sdebug_every_nth == 0)
5408 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5411 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5413 /* Complete the processing of the thread that queued a SCSI command to this
5414 * driver. It either completes the command by calling cmnd_done() or
5415 * schedules a hr timer or work queue then returns 0. Returns
5416 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5418 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5420 int (*pfp)(struct scsi_cmnd *,
5421 struct sdebug_dev_info *),
5422 int delta_jiff, int ndelay)
5425 bool inject = false;
5426 bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
5427 int k, num_in_q, qdepth;
5428 unsigned long iflags;
5429 u64 ns_from_boot = 0;
5430 struct sdebug_queue *sqp;
5431 struct sdebug_queued_cmd *sqcp;
5432 struct scsi_device *sdp;
5433 struct sdebug_defer *sd_dp;
5435 if (unlikely(devip == NULL)) {
5436 if (scsi_result == 0)
5437 scsi_result = DID_NO_CONNECT << 16;
5438 goto respond_in_thread;
5442 if (delta_jiff == 0)
5443 goto respond_in_thread;
5445 sqp = get_queue(cmnd);
5446 spin_lock_irqsave(&sqp->qc_lock, iflags);
5447 if (unlikely(atomic_read(&sqp->blocked))) {
5448 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5449 return SCSI_MLQUEUE_HOST_BUSY;
5451 num_in_q = atomic_read(&devip->num_in_q);
5452 qdepth = cmnd->device->queue_depth;
5453 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5455 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5456 goto respond_in_thread;
5458 scsi_result = device_qfull_result;
5459 } else if (unlikely(sdebug_every_nth &&
5460 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5461 (scsi_result == 0))) {
5462 if ((num_in_q == (qdepth - 1)) &&
5463 (atomic_inc_return(&sdebug_a_tsf) >=
5464 abs(sdebug_every_nth))) {
5465 atomic_set(&sdebug_a_tsf, 0);
5467 scsi_result = device_qfull_result;
5471 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5472 if (unlikely(k >= sdebug_max_queue)) {
5473 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5475 goto respond_in_thread;
5476 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5477 scsi_result = device_qfull_result;
5478 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5479 sdev_printk(KERN_INFO, sdp,
5480 "%s: max_queue=%d exceeded, %s\n",
5481 __func__, sdebug_max_queue,
5482 (scsi_result ? "status: TASK SET FULL" :
5483 "report: host busy"));
5485 goto respond_in_thread;
5487 return SCSI_MLQUEUE_HOST_BUSY;
5489 set_bit(k, sqp->in_use_bm);
5490 atomic_inc(&devip->num_in_q);
5491 sqcp = &sqp->qc_arr[k];
5492 sqcp->a_cmnd = cmnd;
5493 cmnd->host_scribble = (unsigned char *)sqcp;
5494 sd_dp = sqcp->sd_dp;
5495 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5498 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5500 atomic_dec(&devip->num_in_q);
5501 clear_bit(k, sqp->in_use_bm);
5502 return SCSI_MLQUEUE_HOST_BUSY;
5509 /* Set the hostwide tag */
5510 if (sdebug_host_max_queue)
5511 sd_dp->hc_idx = get_tag(cmnd);
5514 ns_from_boot = ktime_get_boottime_ns();
5516 /* one of the resp_*() response functions is called here */
5517 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5518 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5519 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5520 delta_jiff = ndelay = 0;
5522 if (cmnd->result == 0 && scsi_result != 0)
5523 cmnd->result = scsi_result;
5524 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5525 if (atomic_read(&sdeb_inject_pending)) {
5526 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5527 atomic_set(&sdeb_inject_pending, 0);
5528 cmnd->result = check_condition_result;
5532 if (unlikely(sdebug_verbose && cmnd->result))
5533 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5534 __func__, cmnd->result);
5536 if (delta_jiff > 0 || ndelay > 0) {
5539 if (delta_jiff > 0) {
5540 u64 ns = jiffies_to_nsecs(delta_jiff);
5542 if (sdebug_random && ns < U32_MAX) {
5543 ns = prandom_u32_max((u32)ns);
5544 } else if (sdebug_random) {
5545 ns >>= 12; /* scale to 4 usec precision */
5546 if (ns < U32_MAX) /* over 4 hours max */
5547 ns = prandom_u32_max((u32)ns);
5550 kt = ns_to_ktime(ns);
5551 } else { /* ndelay has a 4.2 second max */
5552 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5554 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5555 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5557 if (kt <= d) { /* elapsed duration >= kt */
5558 spin_lock_irqsave(&sqp->qc_lock, iflags);
5559 sqcp->a_cmnd = NULL;
5560 atomic_dec(&devip->num_in_q);
5561 clear_bit(k, sqp->in_use_bm);
5562 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5565 /* call scsi_done() from this thread */
5566 cmnd->scsi_done(cmnd);
5569 /* otherwise reduce kt by elapsed time */
5574 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5575 spin_lock_irqsave(&sqp->qc_lock, iflags);
5576 if (!sd_dp->init_poll) {
5577 sd_dp->init_poll = true;
5578 sqcp->sd_dp = sd_dp;
5579 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5582 sd_dp->defer_t = SDEB_DEFER_POLL;
5583 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5585 if (!sd_dp->init_hrt) {
5586 sd_dp->init_hrt = true;
5587 sqcp->sd_dp = sd_dp;
5588 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5589 HRTIMER_MODE_REL_PINNED);
5590 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5591 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5594 sd_dp->defer_t = SDEB_DEFER_HRT;
5595 /* schedule the invocation of scsi_done() for a later time */
5596 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5598 if (sdebug_statistics)
5599 sd_dp->issuing_cpu = raw_smp_processor_id();
5600 } else { /* jdelay < 0, use work queue */
5601 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5602 atomic_read(&sdeb_inject_pending)))
5603 sd_dp->aborted = true;
5605 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5606 spin_lock_irqsave(&sqp->qc_lock, iflags);
5607 if (!sd_dp->init_poll) {
5608 sd_dp->init_poll = true;
5609 sqcp->sd_dp = sd_dp;
5610 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5613 sd_dp->defer_t = SDEB_DEFER_POLL;
5614 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5616 if (!sd_dp->init_wq) {
5617 sd_dp->init_wq = true;
5618 sqcp->sd_dp = sd_dp;
5619 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5621 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5623 sd_dp->defer_t = SDEB_DEFER_WQ;
5624 schedule_work(&sd_dp->ew.work);
5626 if (sdebug_statistics)
5627 sd_dp->issuing_cpu = raw_smp_processor_id();
5628 if (unlikely(sd_dp->aborted)) {
5629 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5630 scsi_cmd_to_rq(cmnd)->tag);
5631 blk_abort_request(scsi_cmd_to_rq(cmnd));
5632 atomic_set(&sdeb_inject_pending, 0);
5633 sd_dp->aborted = false;
5636 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5637 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5638 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5641 respond_in_thread: /* call back to mid-layer using invocation thread */
5642 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5643 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5644 if (cmnd->result == 0 && scsi_result != 0)
5645 cmnd->result = scsi_result;
5646 cmnd->scsi_done(cmnd);
5650 /* Note: The following macros create attribute files in the
5651 /sys/module/scsi_debug/parameters directory. Unfortunately this
5652 driver is unaware of a change and cannot trigger auxiliary actions
5653 as it can when the corresponding attribute in the
5654 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5656 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5657 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5658 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5659 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5660 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5661 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5662 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5663 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5664 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5665 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5666 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5667 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5668 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5669 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5670 module_param_string(inq_product, sdebug_inq_product_id,
5671 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5672 module_param_string(inq_rev, sdebug_inq_product_rev,
5673 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5674 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5675 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5676 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5677 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5678 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5679 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5680 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5681 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5682 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5683 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5684 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5686 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5688 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5689 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5690 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5691 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5692 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5693 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5694 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5695 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5696 module_param_named(per_host_store, sdebug_per_host_store, bool,
5698 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5699 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5700 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5701 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5702 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5703 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5704 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5705 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5706 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5707 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5708 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5709 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5710 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5711 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5712 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5713 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5714 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5715 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5717 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5718 module_param_named(write_same_length, sdebug_write_same_length, int,
5720 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5721 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5722 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5723 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5725 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5726 MODULE_DESCRIPTION("SCSI debug adapter driver");
5727 MODULE_LICENSE("GPL");
5728 MODULE_VERSION(SDEBUG_VERSION);
5730 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5731 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5732 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5733 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5734 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5735 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5736 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5737 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5738 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5739 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5740 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5741 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5742 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5743 MODULE_PARM_DESC(host_max_queue,
5744 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5745 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5746 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5747 SDEBUG_VERSION "\")");
5748 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5749 MODULE_PARM_DESC(lbprz,
5750 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5751 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5752 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5753 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5754 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5755 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5756 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5757 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5758 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5759 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5760 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5761 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5762 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5763 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5764 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5765 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5766 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5767 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5768 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5769 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5770 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5771 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5772 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5773 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5774 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5775 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5776 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5777 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5778 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5779 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5780 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5781 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5782 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5783 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5784 MODULE_PARM_DESC(uuid_ctl,
5785 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5786 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5787 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5788 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5789 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5790 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5791 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5792 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5793 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5795 #define SDEBUG_INFO_LEN 256
5796 static char sdebug_info[SDEBUG_INFO_LEN];
5798 static const char *scsi_debug_info(struct Scsi_Host *shp)
5802 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5803 my_name, SDEBUG_VERSION, sdebug_version_date);
5804 if (k >= (SDEBUG_INFO_LEN - 1))
5806 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5807 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5808 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5809 "statistics", (int)sdebug_statistics);
5813 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5814 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5819 int minLen = length > 15 ? 15 : length;
5821 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5823 memcpy(arr, buffer, minLen);
5825 if (1 != sscanf(arr, "%d", &opts))
5828 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5829 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5830 if (sdebug_every_nth != 0)
5835 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5836 * same for each scsi_debug host (if more than one). Some of the counters
5837 * output are not atomics so might be inaccurate in a busy system. */
5838 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5841 struct sdebug_queue *sqp;
5842 struct sdebug_host_info *sdhp;
5844 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5845 SDEBUG_VERSION, sdebug_version_date);
5846 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5847 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5848 sdebug_opts, sdebug_every_nth);
5849 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5850 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5851 sdebug_sector_size, "bytes");
5852 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5853 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5855 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5856 num_dev_resets, num_target_resets, num_bus_resets,
5858 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5859 dix_reads, dix_writes, dif_errors);
5860 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5862 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5863 atomic_read(&sdebug_cmnd_count),
5864 atomic_read(&sdebug_completions),
5865 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5866 atomic_read(&sdebug_a_tsf),
5867 atomic_read(&sdeb_mq_poll_count));
5869 seq_printf(m, "submit_queues=%d\n", submit_queues);
5870 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5871 seq_printf(m, " queue %d:\n", j);
5872 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5873 if (f != sdebug_max_queue) {
5874 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5875 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5876 "first,last bits", f, l);
5880 seq_printf(m, "this host_no=%d\n", host->host_no);
5881 if (!xa_empty(per_store_ap)) {
5884 unsigned long l_idx;
5885 struct sdeb_store_info *sip;
5887 seq_puts(m, "\nhost list:\n");
5889 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5891 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5892 sdhp->shost->host_no, idx);
5895 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5896 sdeb_most_recent_idx);
5898 xa_for_each(per_store_ap, l_idx, sip) {
5899 niu = xa_get_mark(per_store_ap, l_idx,
5900 SDEB_XA_NOT_IN_USE);
5902 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5903 (niu ? " not_in_use" : ""));
5910 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5912 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5914 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5915 * of delay is jiffies.
5917 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5922 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5924 if (sdebug_jdelay != jdelay) {
5926 struct sdebug_queue *sqp;
5928 block_unblock_all_queues(true);
5929 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5931 k = find_first_bit(sqp->in_use_bm,
5933 if (k != sdebug_max_queue) {
5934 res = -EBUSY; /* queued commands */
5939 sdebug_jdelay = jdelay;
5942 block_unblock_all_queues(false);
5948 static DRIVER_ATTR_RW(delay);
5950 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5952 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5954 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5955 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5956 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5961 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5962 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5964 if (sdebug_ndelay != ndelay) {
5966 struct sdebug_queue *sqp;
5968 block_unblock_all_queues(true);
5969 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5971 k = find_first_bit(sqp->in_use_bm,
5973 if (k != sdebug_max_queue) {
5974 res = -EBUSY; /* queued commands */
5979 sdebug_ndelay = ndelay;
5980 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5983 block_unblock_all_queues(false);
5989 static DRIVER_ATTR_RW(ndelay);
5991 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5993 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5996 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6002 if (sscanf(buf, "%10s", work) == 1) {
6003 if (strncasecmp(work, "0x", 2) == 0) {
6004 if (kstrtoint(work + 2, 16, &opts) == 0)
6007 if (kstrtoint(work, 10, &opts) == 0)
6014 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6015 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6019 static DRIVER_ATTR_RW(opts);
6021 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6023 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6025 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6030 /* Cannot change from or to TYPE_ZBC with sysfs */
6031 if (sdebug_ptype == TYPE_ZBC)
6034 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6042 static DRIVER_ATTR_RW(ptype);
6044 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6046 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6048 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6053 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6059 static DRIVER_ATTR_RW(dsense);
6061 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6063 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6065 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6070 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6071 bool want_store = (n == 0);
6072 struct sdebug_host_info *sdhp;
6075 sdebug_fake_rw = (sdebug_fake_rw > 0);
6076 if (sdebug_fake_rw == n)
6077 return count; /* not transitioning so do nothing */
6079 if (want_store) { /* 1 --> 0 transition, set up store */
6080 if (sdeb_first_idx < 0) {
6081 idx = sdebug_add_store();
6085 idx = sdeb_first_idx;
6086 xa_clear_mark(per_store_ap, idx,
6087 SDEB_XA_NOT_IN_USE);
6089 /* make all hosts use same store */
6090 list_for_each_entry(sdhp, &sdebug_host_list,
6092 if (sdhp->si_idx != idx) {
6093 xa_set_mark(per_store_ap, sdhp->si_idx,
6094 SDEB_XA_NOT_IN_USE);
6098 sdeb_most_recent_idx = idx;
6099 } else { /* 0 --> 1 transition is trigger for shrink */
6100 sdebug_erase_all_stores(true /* apart from first */);
6107 static DRIVER_ATTR_RW(fake_rw);
6109 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6111 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6113 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6118 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6119 sdebug_no_lun_0 = n;
6124 static DRIVER_ATTR_RW(no_lun_0);
6126 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6128 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6130 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6135 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6136 sdebug_num_tgts = n;
6137 sdebug_max_tgts_luns();
6142 static DRIVER_ATTR_RW(num_tgts);
6144 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6146 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6148 static DRIVER_ATTR_RO(dev_size_mb);
6150 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6152 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6155 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6160 if (kstrtobool(buf, &v))
6163 sdebug_per_host_store = v;
6166 static DRIVER_ATTR_RW(per_host_store);
6168 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6170 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6172 static DRIVER_ATTR_RO(num_parts);
6174 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6176 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6178 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6184 if (sscanf(buf, "%10s", work) == 1) {
6185 if (strncasecmp(work, "0x", 2) == 0) {
6186 if (kstrtoint(work + 2, 16, &nth) == 0)
6187 goto every_nth_done;
6189 if (kstrtoint(work, 10, &nth) == 0)
6190 goto every_nth_done;
6196 sdebug_every_nth = nth;
6197 if (nth && !sdebug_statistics) {
6198 pr_info("every_nth needs statistics=1, set it\n");
6199 sdebug_statistics = true;
6204 static DRIVER_ATTR_RW(every_nth);
6206 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6208 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6210 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6216 if (kstrtoint(buf, 0, &n))
6219 if (n > (int)SAM_LUN_AM_FLAT) {
6220 pr_warn("only LUN address methods 0 and 1 are supported\n");
6223 changed = ((int)sdebug_lun_am != n);
6225 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6226 struct sdebug_host_info *sdhp;
6227 struct sdebug_dev_info *dp;
6229 spin_lock(&sdebug_host_list_lock);
6230 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6231 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6232 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6235 spin_unlock(&sdebug_host_list_lock);
6241 static DRIVER_ATTR_RW(lun_format);
6243 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6245 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6247 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6253 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6255 pr_warn("max_luns can be no more than 256\n");
6258 changed = (sdebug_max_luns != n);
6259 sdebug_max_luns = n;
6260 sdebug_max_tgts_luns();
6261 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6262 struct sdebug_host_info *sdhp;
6263 struct sdebug_dev_info *dp;
6265 spin_lock(&sdebug_host_list_lock);
6266 list_for_each_entry(sdhp, &sdebug_host_list,
6268 list_for_each_entry(dp, &sdhp->dev_info_list,
6270 set_bit(SDEBUG_UA_LUNS_CHANGED,
6274 spin_unlock(&sdebug_host_list_lock);
6280 static DRIVER_ATTR_RW(max_luns);
6282 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6284 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6286 /* N.B. max_queue can be changed while there are queued commands. In flight
6287 * commands beyond the new max_queue will be completed. */
6288 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6292 struct sdebug_queue *sqp;
6294 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6295 (n <= SDEBUG_CANQUEUE) &&
6296 (sdebug_host_max_queue == 0)) {
6297 block_unblock_all_queues(true);
6299 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6301 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6305 sdebug_max_queue = n;
6306 if (k == SDEBUG_CANQUEUE)
6307 atomic_set(&retired_max_queue, 0);
6309 atomic_set(&retired_max_queue, k + 1);
6311 atomic_set(&retired_max_queue, 0);
6312 block_unblock_all_queues(false);
6317 static DRIVER_ATTR_RW(max_queue);
6319 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6321 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6325 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6326 * in range [0, sdebug_host_max_queue), we can't change it.
6328 static DRIVER_ATTR_RO(host_max_queue);
6330 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6332 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6334 static DRIVER_ATTR_RO(no_uld);
6336 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6338 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6340 static DRIVER_ATTR_RO(scsi_level);
6342 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6344 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6346 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6352 /* Ignore capacity change for ZBC drives for now */
6353 if (sdeb_zbc_in_use)
6356 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6357 changed = (sdebug_virtual_gb != n);
6358 sdebug_virtual_gb = n;
6359 sdebug_capacity = get_sdebug_capacity();
6361 struct sdebug_host_info *sdhp;
6362 struct sdebug_dev_info *dp;
6364 spin_lock(&sdebug_host_list_lock);
6365 list_for_each_entry(sdhp, &sdebug_host_list,
6367 list_for_each_entry(dp, &sdhp->dev_info_list,
6369 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6373 spin_unlock(&sdebug_host_list_lock);
6379 static DRIVER_ATTR_RW(virtual_gb);
6381 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6383 /* absolute number of hosts currently active is what is shown */
6384 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6387 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6392 struct sdeb_store_info *sip;
6393 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6396 if (sscanf(buf, "%d", &delta_hosts) != 1)
6398 if (delta_hosts > 0) {
6402 xa_for_each_marked(per_store_ap, idx, sip,
6403 SDEB_XA_NOT_IN_USE) {
6404 sdeb_most_recent_idx = (int)idx;
6408 if (found) /* re-use case */
6409 sdebug_add_host_helper((int)idx);
6411 sdebug_do_add_host(true);
6413 sdebug_do_add_host(false);
6415 } while (--delta_hosts);
6416 } else if (delta_hosts < 0) {
6418 sdebug_do_remove_host(false);
6419 } while (++delta_hosts);
6423 static DRIVER_ATTR_RW(add_host);
6425 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6427 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6429 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6434 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6435 sdebug_vpd_use_hostno = n;
6440 static DRIVER_ATTR_RW(vpd_use_hostno);
6442 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6444 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6446 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6451 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6453 sdebug_statistics = true;
6455 clear_queue_stats();
6456 sdebug_statistics = false;
6462 static DRIVER_ATTR_RW(statistics);
6464 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6466 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6468 static DRIVER_ATTR_RO(sector_size);
6470 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6472 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6474 static DRIVER_ATTR_RO(submit_queues);
6476 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6478 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6480 static DRIVER_ATTR_RO(dix);
6482 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6484 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6486 static DRIVER_ATTR_RO(dif);
6488 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6490 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6492 static DRIVER_ATTR_RO(guard);
6494 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6496 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6498 static DRIVER_ATTR_RO(ato);
6500 static ssize_t map_show(struct device_driver *ddp, char *buf)
6504 if (!scsi_debug_lbp())
6505 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6506 sdebug_store_sectors);
6508 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6509 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6512 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6513 (int)map_size, sip->map_storep);
6515 buf[count++] = '\n';
6520 static DRIVER_ATTR_RO(map);
6522 static ssize_t random_show(struct device_driver *ddp, char *buf)
6524 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6527 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6532 if (kstrtobool(buf, &v))
6538 static DRIVER_ATTR_RW(random);
6540 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6542 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6544 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6549 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6550 sdebug_removable = (n > 0);
6555 static DRIVER_ATTR_RW(removable);
6557 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6559 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6561 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6562 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6567 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6568 sdebug_host_lock = (n > 0);
6573 static DRIVER_ATTR_RW(host_lock);
6575 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6577 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6579 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6584 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6585 sdebug_strict = (n > 0);
6590 static DRIVER_ATTR_RW(strict);
6592 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6594 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6596 static DRIVER_ATTR_RO(uuid_ctl);
6598 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6600 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6602 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6607 ret = kstrtoint(buf, 0, &n);
6611 all_config_cdb_len();
6614 static DRIVER_ATTR_RW(cdb_len);
6616 static const char * const zbc_model_strs_a[] = {
6617 [BLK_ZONED_NONE] = "none",
6618 [BLK_ZONED_HA] = "host-aware",
6619 [BLK_ZONED_HM] = "host-managed",
6622 static const char * const zbc_model_strs_b[] = {
6623 [BLK_ZONED_NONE] = "no",
6624 [BLK_ZONED_HA] = "aware",
6625 [BLK_ZONED_HM] = "managed",
6628 static const char * const zbc_model_strs_c[] = {
6629 [BLK_ZONED_NONE] = "0",
6630 [BLK_ZONED_HA] = "1",
6631 [BLK_ZONED_HM] = "2",
6634 static int sdeb_zbc_model_str(const char *cp)
6636 int res = sysfs_match_string(zbc_model_strs_a, cp);
6639 res = sysfs_match_string(zbc_model_strs_b, cp);
6641 res = sysfs_match_string(zbc_model_strs_c, cp);
6649 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6651 return scnprintf(buf, PAGE_SIZE, "%s\n",
6652 zbc_model_strs_a[sdeb_zbc_model]);
6654 static DRIVER_ATTR_RO(zbc);
6656 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6658 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6660 static DRIVER_ATTR_RO(tur_ms_to_ready);
6662 /* Note: The following array creates attribute files in the
6663 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6664 files (over those found in the /sys/module/scsi_debug/parameters
6665 directory) is that auxiliary actions can be triggered when an attribute
6666 is changed. For example see: add_host_store() above.
6669 static struct attribute *sdebug_drv_attrs[] = {
6670 &driver_attr_delay.attr,
6671 &driver_attr_opts.attr,
6672 &driver_attr_ptype.attr,
6673 &driver_attr_dsense.attr,
6674 &driver_attr_fake_rw.attr,
6675 &driver_attr_host_max_queue.attr,
6676 &driver_attr_no_lun_0.attr,
6677 &driver_attr_num_tgts.attr,
6678 &driver_attr_dev_size_mb.attr,
6679 &driver_attr_num_parts.attr,
6680 &driver_attr_every_nth.attr,
6681 &driver_attr_lun_format.attr,
6682 &driver_attr_max_luns.attr,
6683 &driver_attr_max_queue.attr,
6684 &driver_attr_no_uld.attr,
6685 &driver_attr_scsi_level.attr,
6686 &driver_attr_virtual_gb.attr,
6687 &driver_attr_add_host.attr,
6688 &driver_attr_per_host_store.attr,
6689 &driver_attr_vpd_use_hostno.attr,
6690 &driver_attr_sector_size.attr,
6691 &driver_attr_statistics.attr,
6692 &driver_attr_submit_queues.attr,
6693 &driver_attr_dix.attr,
6694 &driver_attr_dif.attr,
6695 &driver_attr_guard.attr,
6696 &driver_attr_ato.attr,
6697 &driver_attr_map.attr,
6698 &driver_attr_random.attr,
6699 &driver_attr_removable.attr,
6700 &driver_attr_host_lock.attr,
6701 &driver_attr_ndelay.attr,
6702 &driver_attr_strict.attr,
6703 &driver_attr_uuid_ctl.attr,
6704 &driver_attr_cdb_len.attr,
6705 &driver_attr_tur_ms_to_ready.attr,
6706 &driver_attr_zbc.attr,
6709 ATTRIBUTE_GROUPS(sdebug_drv);
6711 static struct device *pseudo_primary;
6713 static int __init scsi_debug_init(void)
6715 bool want_store = (sdebug_fake_rw == 0);
6717 int k, ret, hosts_to_add;
6720 ramdisk_lck_a[0] = &atomic_rw;
6721 ramdisk_lck_a[1] = &atomic_rw2;
6722 atomic_set(&retired_max_queue, 0);
6724 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6725 pr_warn("ndelay must be less than 1 second, ignored\n");
6727 } else if (sdebug_ndelay > 0)
6728 sdebug_jdelay = JDELAY_OVERRIDDEN;
6730 switch (sdebug_sector_size) {
6737 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6741 switch (sdebug_dif) {
6742 case T10_PI_TYPE0_PROTECTION:
6744 case T10_PI_TYPE1_PROTECTION:
6745 case T10_PI_TYPE2_PROTECTION:
6746 case T10_PI_TYPE3_PROTECTION:
6747 have_dif_prot = true;
6751 pr_err("dif must be 0, 1, 2 or 3\n");
6755 if (sdebug_num_tgts < 0) {
6756 pr_err("num_tgts must be >= 0\n");
6760 if (sdebug_guard > 1) {
6761 pr_err("guard must be 0 or 1\n");
6765 if (sdebug_ato > 1) {
6766 pr_err("ato must be 0 or 1\n");
6770 if (sdebug_physblk_exp > 15) {
6771 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6775 sdebug_lun_am = sdebug_lun_am_i;
6776 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6777 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6778 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6781 if (sdebug_max_luns > 256) {
6782 if (sdebug_max_luns > 16384) {
6783 pr_warn("max_luns can be no more than 16384, use default\n");
6784 sdebug_max_luns = DEF_MAX_LUNS;
6786 sdebug_lun_am = SAM_LUN_AM_FLAT;
6789 if (sdebug_lowest_aligned > 0x3fff) {
6790 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6794 if (submit_queues < 1) {
6795 pr_err("submit_queues must be 1 or more\n");
6799 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6800 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6804 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6805 (sdebug_host_max_queue < 0)) {
6806 pr_err("host_max_queue must be in range [0 %d]\n",
6811 if (sdebug_host_max_queue &&
6812 (sdebug_max_queue != sdebug_host_max_queue)) {
6813 sdebug_max_queue = sdebug_host_max_queue;
6814 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6818 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6820 if (sdebug_q_arr == NULL)
6822 for (k = 0; k < submit_queues; ++k)
6823 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6826 * check for host managed zoned block device specified with
6827 * ptype=0x14 or zbc=XXX.
6829 if (sdebug_ptype == TYPE_ZBC) {
6830 sdeb_zbc_model = BLK_ZONED_HM;
6831 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6832 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6838 switch (sdeb_zbc_model) {
6839 case BLK_ZONED_NONE:
6841 sdebug_ptype = TYPE_DISK;
6844 sdebug_ptype = TYPE_ZBC;
6847 pr_err("Invalid ZBC model\n");
6852 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6853 sdeb_zbc_in_use = true;
6854 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6855 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6858 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6859 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6860 if (sdebug_dev_size_mb < 1)
6861 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6862 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6863 sdebug_store_sectors = sz / sdebug_sector_size;
6864 sdebug_capacity = get_sdebug_capacity();
6866 /* play around with geometry, don't waste too much on track 0 */
6868 sdebug_sectors_per = 32;
6869 if (sdebug_dev_size_mb >= 256)
6871 else if (sdebug_dev_size_mb >= 16)
6873 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6874 (sdebug_sectors_per * sdebug_heads);
6875 if (sdebug_cylinders_per >= 1024) {
6876 /* other LLDs do this; implies >= 1GB ram disk ... */
6878 sdebug_sectors_per = 63;
6879 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6880 (sdebug_sectors_per * sdebug_heads);
6882 if (scsi_debug_lbp()) {
6883 sdebug_unmap_max_blocks =
6884 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6886 sdebug_unmap_max_desc =
6887 clamp(sdebug_unmap_max_desc, 0U, 256U);
6889 sdebug_unmap_granularity =
6890 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6892 if (sdebug_unmap_alignment &&
6893 sdebug_unmap_granularity <=
6894 sdebug_unmap_alignment) {
6895 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6900 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6902 idx = sdebug_add_store();
6909 pseudo_primary = root_device_register("pseudo_0");
6910 if (IS_ERR(pseudo_primary)) {
6911 pr_warn("root_device_register() error\n");
6912 ret = PTR_ERR(pseudo_primary);
6915 ret = bus_register(&pseudo_lld_bus);
6917 pr_warn("bus_register error: %d\n", ret);
6920 ret = driver_register(&sdebug_driverfs_driver);
6922 pr_warn("driver_register error: %d\n", ret);
6926 hosts_to_add = sdebug_add_host;
6927 sdebug_add_host = 0;
6929 for (k = 0; k < hosts_to_add; k++) {
6930 if (want_store && k == 0) {
6931 ret = sdebug_add_host_helper(idx);
6933 pr_err("add_host_helper k=%d, error=%d\n",
6938 ret = sdebug_do_add_host(want_store &&
6939 sdebug_per_host_store);
6941 pr_err("add_host k=%d error=%d\n", k, -ret);
6947 pr_info("built %d host(s)\n", sdebug_num_hosts);
6952 bus_unregister(&pseudo_lld_bus);
6954 root_device_unregister(pseudo_primary);
6956 sdebug_erase_store(idx, NULL);
6958 kfree(sdebug_q_arr);
6962 static void __exit scsi_debug_exit(void)
6964 int k = sdebug_num_hosts;
6968 sdebug_do_remove_host(true);
6970 driver_unregister(&sdebug_driverfs_driver);
6971 bus_unregister(&pseudo_lld_bus);
6972 root_device_unregister(pseudo_primary);
6974 sdebug_erase_all_stores(false);
6975 xa_destroy(per_store_ap);
6976 kfree(sdebug_q_arr);
6979 device_initcall(scsi_debug_init);
6980 module_exit(scsi_debug_exit);
6982 static void sdebug_release_adapter(struct device *dev)
6984 struct sdebug_host_info *sdbg_host;
6986 sdbg_host = to_sdebug_host(dev);
6990 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6991 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6996 if (xa_empty(per_store_ap))
6998 sip = xa_load(per_store_ap, idx);
7002 vfree(sip->map_storep);
7003 vfree(sip->dif_storep);
7005 xa_erase(per_store_ap, idx);
7009 /* Assume apart_from_first==false only in shutdown case. */
7010 static void sdebug_erase_all_stores(bool apart_from_first)
7013 struct sdeb_store_info *sip = NULL;
7015 xa_for_each(per_store_ap, idx, sip) {
7016 if (apart_from_first)
7017 apart_from_first = false;
7019 sdebug_erase_store(idx, sip);
7021 if (apart_from_first)
7022 sdeb_most_recent_idx = sdeb_first_idx;
7026 * Returns store xarray new element index (idx) if >=0 else negated errno.
7027 * Limit the number of stores to 65536.
7029 static int sdebug_add_store(void)
7033 unsigned long iflags;
7034 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7035 struct sdeb_store_info *sip = NULL;
7036 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7038 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7042 xa_lock_irqsave(per_store_ap, iflags);
7043 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7044 if (unlikely(res < 0)) {
7045 xa_unlock_irqrestore(per_store_ap, iflags);
7047 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7050 sdeb_most_recent_idx = n_idx;
7051 if (sdeb_first_idx < 0)
7052 sdeb_first_idx = n_idx;
7053 xa_unlock_irqrestore(per_store_ap, iflags);
7056 sip->storep = vzalloc(sz);
7058 pr_err("user data oom\n");
7061 if (sdebug_num_parts > 0)
7062 sdebug_build_parts(sip->storep, sz);
7064 /* DIF/DIX: what T10 calls Protection Information (PI) */
7068 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7069 sip->dif_storep = vmalloc(dif_size);
7071 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7074 if (!sip->dif_storep) {
7075 pr_err("DIX oom\n");
7078 memset(sip->dif_storep, 0xff, dif_size);
7080 /* Logical Block Provisioning */
7081 if (scsi_debug_lbp()) {
7082 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7083 sip->map_storep = vmalloc(array_size(sizeof(long),
7084 BITS_TO_LONGS(map_size)));
7086 pr_info("%lu provisioning blocks\n", map_size);
7088 if (!sip->map_storep) {
7089 pr_err("LBP map oom\n");
7093 bitmap_zero(sip->map_storep, map_size);
7095 /* Map first 1KB for partition table */
7096 if (sdebug_num_parts)
7097 map_region(sip, 0, 2);
7100 rwlock_init(&sip->macc_lck);
7103 sdebug_erase_store((int)n_idx, sip);
7104 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7108 static int sdebug_add_host_helper(int per_host_idx)
7110 int k, devs_per_host, idx;
7111 int error = -ENOMEM;
7112 struct sdebug_host_info *sdbg_host;
7113 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7115 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7118 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7119 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7120 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7121 sdbg_host->si_idx = idx;
7123 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7125 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7126 for (k = 0; k < devs_per_host; k++) {
7127 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7132 spin_lock(&sdebug_host_list_lock);
7133 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7134 spin_unlock(&sdebug_host_list_lock);
7136 sdbg_host->dev.bus = &pseudo_lld_bus;
7137 sdbg_host->dev.parent = pseudo_primary;
7138 sdbg_host->dev.release = &sdebug_release_adapter;
7139 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7141 error = device_register(&sdbg_host->dev);
7143 spin_lock(&sdebug_host_list_lock);
7144 list_del(&sdbg_host->host_list);
7145 spin_unlock(&sdebug_host_list_lock);
7153 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7155 list_del(&sdbg_devinfo->dev_list);
7156 kfree(sdbg_devinfo->zstate);
7157 kfree(sdbg_devinfo);
7159 if (sdbg_host->dev.release)
7160 put_device(&sdbg_host->dev);
7163 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7167 static int sdebug_do_add_host(bool mk_new_store)
7169 int ph_idx = sdeb_most_recent_idx;
7172 ph_idx = sdebug_add_store();
7176 return sdebug_add_host_helper(ph_idx);
7179 static void sdebug_do_remove_host(bool the_end)
7182 struct sdebug_host_info *sdbg_host = NULL;
7183 struct sdebug_host_info *sdbg_host2;
7185 spin_lock(&sdebug_host_list_lock);
7186 if (!list_empty(&sdebug_host_list)) {
7187 sdbg_host = list_entry(sdebug_host_list.prev,
7188 struct sdebug_host_info, host_list);
7189 idx = sdbg_host->si_idx;
7191 if (!the_end && idx >= 0) {
7194 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7195 if (sdbg_host2 == sdbg_host)
7197 if (idx == sdbg_host2->si_idx) {
7203 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7204 if (idx == sdeb_most_recent_idx)
7205 --sdeb_most_recent_idx;
7209 list_del(&sdbg_host->host_list);
7210 spin_unlock(&sdebug_host_list_lock);
7215 device_unregister(&sdbg_host->dev);
7219 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7222 struct sdebug_dev_info *devip;
7224 block_unblock_all_queues(true);
7225 devip = (struct sdebug_dev_info *)sdev->hostdata;
7226 if (NULL == devip) {
7227 block_unblock_all_queues(false);
7230 num_in_q = atomic_read(&devip->num_in_q);
7232 if (qdepth > SDEBUG_CANQUEUE) {
7233 qdepth = SDEBUG_CANQUEUE;
7234 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7235 qdepth, SDEBUG_CANQUEUE);
7239 if (qdepth != sdev->queue_depth)
7240 scsi_change_queue_depth(sdev, qdepth);
7242 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7243 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7244 __func__, qdepth, num_in_q);
7246 block_unblock_all_queues(false);
7247 return sdev->queue_depth;
7250 static bool fake_timeout(struct scsi_cmnd *scp)
7252 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7253 if (sdebug_every_nth < -1)
7254 sdebug_every_nth = -1;
7255 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7256 return true; /* ignore command causing timeout */
7257 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7258 scsi_medium_access_command(scp))
7259 return true; /* time out reads and writes */
7264 /* Response to TUR or media access command when device stopped */
7265 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7269 ktime_t now_ts = ktime_get_boottime();
7270 struct scsi_device *sdp = scp->device;
7272 stopped_state = atomic_read(&devip->stopped);
7273 if (stopped_state == 2) {
7274 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7275 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7276 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7277 /* tur_ms_to_ready timer extinguished */
7278 atomic_set(&devip->stopped, 0);
7282 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7284 sdev_printk(KERN_INFO, sdp,
7285 "%s: Not ready: in process of becoming ready\n", my_name);
7286 if (scp->cmnd[0] == TEST_UNIT_READY) {
7287 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7289 if (diff_ns <= tur_nanosecs_to_ready)
7290 diff_ns = tur_nanosecs_to_ready - diff_ns;
7292 diff_ns = tur_nanosecs_to_ready;
7293 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7294 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7295 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7297 return check_condition_result;
7300 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7302 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7304 return check_condition_result;
7307 static int sdebug_map_queues(struct Scsi_Host *shost)
7311 if (shost->nr_hw_queues == 1)
7314 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7315 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7319 if (i == HCTX_TYPE_DEFAULT)
7320 map->nr_queues = submit_queues - poll_queues;
7321 else if (i == HCTX_TYPE_POLL)
7322 map->nr_queues = poll_queues;
7324 if (!map->nr_queues) {
7325 BUG_ON(i == HCTX_TYPE_DEFAULT);
7329 map->queue_offset = qoff;
7330 blk_mq_map_queues(map);
7332 qoff += map->nr_queues;
7339 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7342 bool retiring = false;
7343 int num_entries = 0;
7344 unsigned int qc_idx = 0;
7345 unsigned long iflags;
7346 ktime_t kt_from_boot = ktime_get_boottime();
7347 struct sdebug_queue *sqp;
7348 struct sdebug_queued_cmd *sqcp;
7349 struct scsi_cmnd *scp;
7350 struct sdebug_dev_info *devip;
7351 struct sdebug_defer *sd_dp;
7353 sqp = sdebug_q_arr + queue_num;
7354 spin_lock_irqsave(&sqp->qc_lock, iflags);
7356 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7358 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7361 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7363 if (unlikely(qc_idx >= sdebug_max_queue))
7366 sqcp = &sqp->qc_arr[qc_idx];
7367 sd_dp = sqcp->sd_dp;
7368 if (unlikely(!sd_dp))
7371 if (unlikely(scp == NULL)) {
7372 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7373 queue_num, qc_idx, __func__);
7376 if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7377 if (kt_from_boot < sd_dp->cmpl_ts)
7380 } else /* ignoring non REQ_HIPRI requests */
7382 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7384 atomic_dec(&devip->num_in_q);
7386 pr_err("devip=NULL from %s\n", __func__);
7387 if (unlikely(atomic_read(&retired_max_queue) > 0))
7390 sqcp->a_cmnd = NULL;
7391 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7392 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7393 sqp, queue_num, qc_idx, __func__);
7396 if (unlikely(retiring)) { /* user has reduced max_queue */
7399 retval = atomic_read(&retired_max_queue);
7400 if (qc_idx >= retval) {
7401 pr_err("index %d too large\n", retval);
7404 k = find_last_bit(sqp->in_use_bm, retval);
7405 if ((k < sdebug_max_queue) || (k == retval))
7406 atomic_set(&retired_max_queue, 0);
7408 atomic_set(&retired_max_queue, k + 1);
7410 sd_dp->defer_t = SDEB_DEFER_NONE;
7411 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7412 scp->scsi_done(scp); /* callback to mid level */
7413 spin_lock_irqsave(&sqp->qc_lock, iflags);
7416 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7417 if (num_entries > 0)
7418 atomic_add(num_entries, &sdeb_mq_poll_count);
7422 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7423 struct scsi_cmnd *scp)
7426 struct scsi_device *sdp = scp->device;
7427 const struct opcode_info_t *oip;
7428 const struct opcode_info_t *r_oip;
7429 struct sdebug_dev_info *devip;
7430 u8 *cmd = scp->cmnd;
7431 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7432 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7435 u64 lun_index = sdp->lun & 0x3FFF;
7442 scsi_set_resid(scp, 0);
7443 if (sdebug_statistics) {
7444 atomic_inc(&sdebug_cmnd_count);
7445 inject_now = inject_on_this_cmd();
7449 if (unlikely(sdebug_verbose &&
7450 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7455 sb = (int)sizeof(b);
7457 strcpy(b, "too long, over 32 bytes");
7459 for (k = 0, n = 0; k < len && n < sb; ++k)
7460 n += scnprintf(b + n, sb - n, "%02x ",
7463 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7464 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7466 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7467 return SCSI_MLQUEUE_HOST_BUSY;
7468 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7469 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7472 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7473 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7474 devip = (struct sdebug_dev_info *)sdp->hostdata;
7475 if (unlikely(!devip)) {
7476 devip = find_build_dev_info(sdp);
7480 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7481 atomic_set(&sdeb_inject_pending, 1);
7483 na = oip->num_attached;
7485 if (na) { /* multiple commands with this opcode */
7487 if (FF_SA & r_oip->flags) {
7488 if (F_SA_LOW & oip->flags)
7491 sa = get_unaligned_be16(cmd + 8);
7492 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7493 if (opcode == oip->opcode && sa == oip->sa)
7496 } else { /* since no service action only check opcode */
7497 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7498 if (opcode == oip->opcode)
7503 if (F_SA_LOW & r_oip->flags)
7504 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7505 else if (F_SA_HIGH & r_oip->flags)
7506 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7508 mk_sense_invalid_opcode(scp);
7511 } /* else (when na==0) we assume the oip is a match */
7513 if (unlikely(F_INV_OP & flags)) {
7514 mk_sense_invalid_opcode(scp);
7517 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7519 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7520 my_name, opcode, " supported for wlun");
7521 mk_sense_invalid_opcode(scp);
7524 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7528 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7529 rem = ~oip->len_mask[k] & cmd[k];
7531 for (j = 7; j >= 0; --j, rem <<= 1) {
7535 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7540 if (unlikely(!(F_SKIP_UA & flags) &&
7541 find_first_bit(devip->uas_bm,
7542 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7543 errsts = make_ua(scp, devip);
7547 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7548 atomic_read(&devip->stopped))) {
7549 errsts = resp_not_ready(scp, devip);
7553 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7555 if (unlikely(sdebug_every_nth)) {
7556 if (fake_timeout(scp))
7557 return 0; /* ignore command: make trouble */
7559 if (likely(oip->pfp))
7560 pfp = oip->pfp; /* calls a resp_* function */
7562 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7565 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7566 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7567 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7568 sdebug_ndelay > 10000)) {
7570 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7571 * for Start Stop Unit (SSU) want at least 1 second delay and
7572 * if sdebug_jdelay>1 want a long delay of that many seconds.
7573 * For Synchronize Cache want 1/20 of SSU's delay.
7575 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7576 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7578 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7579 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7581 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7584 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7586 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7589 static struct scsi_host_template sdebug_driver_template = {
7590 .show_info = scsi_debug_show_info,
7591 .write_info = scsi_debug_write_info,
7592 .proc_name = sdebug_proc_name,
7593 .name = "SCSI DEBUG",
7594 .info = scsi_debug_info,
7595 .slave_alloc = scsi_debug_slave_alloc,
7596 .slave_configure = scsi_debug_slave_configure,
7597 .slave_destroy = scsi_debug_slave_destroy,
7598 .ioctl = scsi_debug_ioctl,
7599 .queuecommand = scsi_debug_queuecommand,
7600 .change_queue_depth = sdebug_change_qdepth,
7601 .map_queues = sdebug_map_queues,
7602 .mq_poll = sdebug_blk_mq_poll,
7603 .eh_abort_handler = scsi_debug_abort,
7604 .eh_device_reset_handler = scsi_debug_device_reset,
7605 .eh_target_reset_handler = scsi_debug_target_reset,
7606 .eh_bus_reset_handler = scsi_debug_bus_reset,
7607 .eh_host_reset_handler = scsi_debug_host_reset,
7608 .can_queue = SDEBUG_CANQUEUE,
7610 .sg_tablesize = SG_MAX_SEGMENTS,
7611 .cmd_per_lun = DEF_CMD_PER_LUN,
7613 .max_segment_size = -1U,
7614 .module = THIS_MODULE,
7615 .track_queue_depth = 1,
7618 static int sdebug_driver_probe(struct device *dev)
7621 struct sdebug_host_info *sdbg_host;
7622 struct Scsi_Host *hpnt;
7625 sdbg_host = to_sdebug_host(dev);
7627 sdebug_driver_template.can_queue = sdebug_max_queue;
7628 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7629 if (!sdebug_clustering)
7630 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7632 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7634 pr_err("scsi_host_alloc failed\n");
7638 if (submit_queues > nr_cpu_ids) {
7639 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7640 my_name, submit_queues, nr_cpu_ids);
7641 submit_queues = nr_cpu_ids;
7644 * Decide whether to tell scsi subsystem that we want mq. The
7645 * following should give the same answer for each host.
7647 hpnt->nr_hw_queues = submit_queues;
7648 if (sdebug_host_max_queue)
7649 hpnt->host_tagset = 1;
7651 /* poll queues are possible for nr_hw_queues > 1 */
7652 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7653 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7654 my_name, poll_queues, hpnt->nr_hw_queues);
7659 * Poll queues don't need interrupts, but we need at least one I/O queue
7660 * left over for non-polled I/O.
7661 * If condition not met, trim poll_queues to 1 (just for simplicity).
7663 if (poll_queues >= submit_queues) {
7664 if (submit_queues < 3)
7665 pr_warn("%s: trim poll_queues to 1\n", my_name);
7667 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7668 my_name, submit_queues - 1);
7674 sdbg_host->shost = hpnt;
7675 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7676 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7677 hpnt->max_id = sdebug_num_tgts + 1;
7679 hpnt->max_id = sdebug_num_tgts;
7680 /* = sdebug_max_luns; */
7681 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7685 switch (sdebug_dif) {
7687 case T10_PI_TYPE1_PROTECTION:
7688 hprot = SHOST_DIF_TYPE1_PROTECTION;
7690 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7693 case T10_PI_TYPE2_PROTECTION:
7694 hprot = SHOST_DIF_TYPE2_PROTECTION;
7696 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7699 case T10_PI_TYPE3_PROTECTION:
7700 hprot = SHOST_DIF_TYPE3_PROTECTION;
7702 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7707 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7711 scsi_host_set_prot(hpnt, hprot);
7713 if (have_dif_prot || sdebug_dix)
7714 pr_info("host protection%s%s%s%s%s%s%s\n",
7715 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7716 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7717 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7718 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7719 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7720 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7721 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7723 if (sdebug_guard == 1)
7724 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7726 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7728 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7729 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7730 if (sdebug_every_nth) /* need stats counters for every_nth */
7731 sdebug_statistics = true;
7732 error = scsi_add_host(hpnt, &sdbg_host->dev);
7734 pr_err("scsi_add_host failed\n");
7736 scsi_host_put(hpnt);
7738 scsi_scan_host(hpnt);
7744 static void sdebug_driver_remove(struct device *dev)
7746 struct sdebug_host_info *sdbg_host;
7747 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7749 sdbg_host = to_sdebug_host(dev);
7751 scsi_remove_host(sdbg_host->shost);
7753 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7755 list_del(&sdbg_devinfo->dev_list);
7756 kfree(sdbg_devinfo->zstate);
7757 kfree(sdbg_devinfo);
7760 scsi_host_put(sdbg_host->shost);
7763 static int pseudo_lld_bus_match(struct device *dev,
7764 struct device_driver *dev_driver)
7769 static struct bus_type pseudo_lld_bus = {
7771 .match = pseudo_lld_bus_match,
7772 .probe = sdebug_driver_probe,
7773 .remove = sdebug_driver_remove,
7774 .drv_groups = sdebug_drv_groups,