2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
94 #define DEF_DEV_SIZE_MB 8
98 #define DEF_EVERY_NTH 0
103 #define DEF_LBPWS10 0
105 #define DEF_LOWEST_ALIGNED 0
106 #define DEF_NO_LUN_0 0
107 #define DEF_NUM_PARTS 0
109 #define DEF_OPT_BLKS 64
110 #define DEF_PHYSBLK_EXP 0
112 #define DEF_REMOVABLE false
113 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
114 #define DEF_SECTOR_SIZE 512
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_UNMAP_GRANULARITY 1
117 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
118 #define DEF_UNMAP_MAX_DESC 256
119 #define DEF_VIRTUAL_GB 0
120 #define DEF_VPD_USE_HOSTNO 1
121 #define DEF_WRITESAME_LENGTH 0xFFFF
123 /* bit mask values for scsi_debug_opts */
124 #define SCSI_DEBUG_OPT_NOISE 1
125 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
126 #define SCSI_DEBUG_OPT_TIMEOUT 4
127 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
128 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
129 #define SCSI_DEBUG_OPT_DIF_ERR 32
130 #define SCSI_DEBUG_OPT_DIX_ERR 64
131 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
132 /* When "every_nth" > 0 then modulo "every_nth" commands:
133 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134 * - a RECOVERED_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136 * - a TRANSPORT_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
139 * When "every_nth" < 0 then after "- every_nth" commands:
140 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
141 * - a RECOVERED_ERROR is simulated on successful read and write
142 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
143 * - a TRANSPORT_ERROR is simulated on successful read and write
144 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
145 * This will continue until some other action occurs (e.g. the user
146 * writing a new value (other than -1 or 1) to every_nth via sysfs).
149 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
150 * sector on read commands: */
151 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
152 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
154 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
155 * or "peripheral device" addressing (value 0) */
156 #define SAM2_LUN_ADDRESS_METHOD 0
157 #define SAM2_WLUN_REPORT_LUNS 0xc101
159 /* Can queue up to this number of commands. Typically commands that
160 * that have a non-zero delay are queued. */
161 #define SCSI_DEBUG_CANQUEUE 255
163 static int scsi_debug_add_host = DEF_NUM_HOST;
164 static int scsi_debug_ato = DEF_ATO;
165 static int scsi_debug_delay = DEF_DELAY;
166 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
167 static int scsi_debug_dif = DEF_DIF;
168 static int scsi_debug_dix = DEF_DIX;
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_every_nth = DEF_EVERY_NTH;
171 static int scsi_debug_fake_rw = DEF_FAKE_RW;
172 static unsigned int scsi_debug_guard = DEF_GUARD;
173 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
174 static int scsi_debug_max_luns = DEF_MAX_LUNS;
175 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
176 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
177 static int scsi_debug_no_uld = 0;
178 static int scsi_debug_num_parts = DEF_NUM_PARTS;
179 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
180 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
181 static int scsi_debug_opts = DEF_OPTS;
182 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
183 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
184 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
185 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
186 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
187 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
188 static unsigned int scsi_debug_lbpu = DEF_LBPU;
189 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
190 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
191 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
192 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
193 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
194 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
195 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
196 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197 static bool scsi_debug_removable = DEF_REMOVABLE;
199 static int scsi_debug_cmnd_count = 0;
201 #define DEV_READONLY(TGT) (0)
203 static unsigned int sdebug_store_sectors;
204 static sector_t sdebug_capacity; /* in sectors */
206 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
207 may still need them */
208 static int sdebug_heads; /* heads per disk */
209 static int sdebug_cylinders_per; /* cylinders per surface */
210 static int sdebug_sectors_per; /* sectors per cylinder */
212 #define SDEBUG_MAX_PARTS 4
214 #define SDEBUG_SENSE_LEN 32
216 #define SCSI_DEBUG_MAX_CMD_LEN 32
218 static unsigned int scsi_debug_lbp(void)
220 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
223 struct sdebug_dev_info {
224 struct list_head dev_list;
225 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
226 unsigned int channel;
229 struct sdebug_host_info *sdbg_host;
236 struct sdebug_host_info {
237 struct list_head host_list;
238 struct Scsi_Host *shost;
240 struct list_head dev_info_list;
243 #define to_sdebug_host(d) \
244 container_of(d, struct sdebug_host_info, dev)
246 static LIST_HEAD(sdebug_host_list);
247 static DEFINE_SPINLOCK(sdebug_host_list_lock);
249 typedef void (* done_funct_t) (struct scsi_cmnd *);
251 struct sdebug_queued_cmd {
253 struct timer_list cmnd_timer;
254 done_funct_t done_funct;
255 struct scsi_cmnd * a_cmnd;
258 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
260 static unsigned char * fake_storep; /* ramdisk storage */
261 static struct sd_dif_tuple *dif_storep; /* protection info */
262 static void *map_storep; /* provisioning map */
264 static unsigned long map_size;
265 static int num_aborts = 0;
266 static int num_dev_resets = 0;
267 static int num_bus_resets = 0;
268 static int num_host_resets = 0;
269 static int dix_writes;
270 static int dix_reads;
271 static int dif_errors;
273 static DEFINE_SPINLOCK(queued_arr_lock);
274 static DEFINE_RWLOCK(atomic_rw);
276 static char sdebug_proc_name[] = "scsi_debug";
278 static struct bus_type pseudo_lld_bus;
280 static struct device_driver sdebug_driverfs_driver = {
281 .name = sdebug_proc_name,
282 .bus = &pseudo_lld_bus,
285 static const int check_condition_result =
286 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
288 static const int illegal_condition_result =
289 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
291 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
293 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
296 static void *fake_store(unsigned long long lba)
298 lba = do_div(lba, sdebug_store_sectors);
300 return fake_storep + lba * scsi_debug_sector_size;
303 static struct sd_dif_tuple *dif_store(sector_t sector)
305 sector = do_div(sector, sdebug_store_sectors);
307 return dif_storep + sector;
310 static int sdebug_add_adapter(void);
311 static void sdebug_remove_adapter(void);
313 static void sdebug_max_tgts_luns(void)
315 struct sdebug_host_info *sdbg_host;
316 struct Scsi_Host *hpnt;
318 spin_lock(&sdebug_host_list_lock);
319 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
320 hpnt = sdbg_host->shost;
321 if ((hpnt->this_id >= 0) &&
322 (scsi_debug_num_tgts > hpnt->this_id))
323 hpnt->max_id = scsi_debug_num_tgts + 1;
325 hpnt->max_id = scsi_debug_num_tgts;
326 /* scsi_debug_max_luns; */
327 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
329 spin_unlock(&sdebug_host_list_lock);
332 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
335 unsigned char *sbuff;
337 sbuff = devip->sense_buff;
338 memset(sbuff, 0, SDEBUG_SENSE_LEN);
340 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
342 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
343 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
344 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
347 static void get_data_transfer_info(unsigned char *cmd,
348 unsigned long long *lba, unsigned int *num,
354 case VARIABLE_LENGTH_CMD:
355 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
356 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
357 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
358 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
360 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
361 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
363 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
370 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
371 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
372 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
373 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
375 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
380 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
383 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
390 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
393 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
397 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
398 (u32)(cmd[1] & 0x1f) << 16;
399 *num = (0 == cmd[4]) ? 256 : cmd[4];
406 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
408 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
409 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
412 /* return -ENOTTY; // correct return but upsets fdisk */
415 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
416 struct sdebug_dev_info * devip)
419 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
420 printk(KERN_INFO "scsi_debug: Reporting Unit "
421 "attention: power on reset\n");
423 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
424 return check_condition_result;
426 if ((0 == reset_only) && devip->stopped) {
427 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
428 printk(KERN_INFO "scsi_debug: Reporting Not "
429 "ready: initializing command required\n");
430 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
432 return check_condition_result;
437 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
438 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
442 struct scsi_data_buffer *sdb = scsi_in(scp);
446 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
447 return (DID_ERROR << 16);
449 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
451 sdb->resid = scsi_bufflen(scp) - act_len;
456 /* Returns number of bytes fetched into 'arr' or -1 if error. */
457 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
460 if (!scsi_bufflen(scp))
462 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
465 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
469 static const char * inq_vendor_id = "Linux ";
470 static const char * inq_product_id = "scsi_debug ";
471 static const char * inq_product_rev = "0004";
473 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
474 int target_dev_id, int dev_id_num,
475 const char * dev_id_str,
481 port_a = target_dev_id + 1;
482 /* T10 vendor identifier field format (faked) */
483 arr[0] = 0x2; /* ASCII */
486 memcpy(&arr[4], inq_vendor_id, 8);
487 memcpy(&arr[12], inq_product_id, 16);
488 memcpy(&arr[28], dev_id_str, dev_id_str_len);
489 num = 8 + 16 + dev_id_str_len;
492 if (dev_id_num >= 0) {
493 /* NAA-5, Logical unit identifier (binary) */
494 arr[num++] = 0x1; /* binary (not necessarily sas) */
495 arr[num++] = 0x3; /* PIV=0, lu, naa */
498 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
502 arr[num++] = (dev_id_num >> 24);
503 arr[num++] = (dev_id_num >> 16) & 0xff;
504 arr[num++] = (dev_id_num >> 8) & 0xff;
505 arr[num++] = dev_id_num & 0xff;
506 /* Target relative port number */
507 arr[num++] = 0x61; /* proto=sas, binary */
508 arr[num++] = 0x94; /* PIV=1, target port, rel port */
509 arr[num++] = 0x0; /* reserved */
510 arr[num++] = 0x4; /* length */
511 arr[num++] = 0x0; /* reserved */
512 arr[num++] = 0x0; /* reserved */
514 arr[num++] = 0x1; /* relative port A */
516 /* NAA-5, Target port identifier */
517 arr[num++] = 0x61; /* proto=sas, binary */
518 arr[num++] = 0x93; /* piv=1, target port, naa */
521 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
525 arr[num++] = (port_a >> 24);
526 arr[num++] = (port_a >> 16) & 0xff;
527 arr[num++] = (port_a >> 8) & 0xff;
528 arr[num++] = port_a & 0xff;
529 /* NAA-5, Target port group identifier */
530 arr[num++] = 0x61; /* proto=sas, binary */
531 arr[num++] = 0x95; /* piv=1, target port group id */
536 arr[num++] = (port_group_id >> 8) & 0xff;
537 arr[num++] = port_group_id & 0xff;
538 /* NAA-5, Target device identifier */
539 arr[num++] = 0x61; /* proto=sas, binary */
540 arr[num++] = 0xa3; /* piv=1, target device, naa */
543 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
547 arr[num++] = (target_dev_id >> 24);
548 arr[num++] = (target_dev_id >> 16) & 0xff;
549 arr[num++] = (target_dev_id >> 8) & 0xff;
550 arr[num++] = target_dev_id & 0xff;
551 /* SCSI name string: Target device identifier */
552 arr[num++] = 0x63; /* proto=sas, UTF-8 */
553 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
556 memcpy(arr + num, "naa.52222220", 12);
558 snprintf(b, sizeof(b), "%08X", target_dev_id);
559 memcpy(arr + num, b, 8);
561 memset(arr + num, 0, 4);
567 static unsigned char vpd84_data[] = {
568 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
569 0x22,0x22,0x22,0x0,0xbb,0x1,
570 0x22,0x22,0x22,0x0,0xbb,0x2,
573 static int inquiry_evpd_84(unsigned char * arr)
575 memcpy(arr, vpd84_data, sizeof(vpd84_data));
576 return sizeof(vpd84_data);
579 static int inquiry_evpd_85(unsigned char * arr)
582 const char * na1 = "https://www.kernel.org/config";
583 const char * na2 = "http://www.kernel.org/log";
586 arr[num++] = 0x1; /* lu, storage config */
587 arr[num++] = 0x0; /* reserved */
592 plen = ((plen / 4) + 1) * 4;
593 arr[num++] = plen; /* length, null termianted, padded */
594 memcpy(arr + num, na1, olen);
595 memset(arr + num + olen, 0, plen - olen);
598 arr[num++] = 0x4; /* lu, logging */
599 arr[num++] = 0x0; /* reserved */
604 plen = ((plen / 4) + 1) * 4;
605 arr[num++] = plen; /* length, null terminated, padded */
606 memcpy(arr + num, na2, olen);
607 memset(arr + num + olen, 0, plen - olen);
613 /* SCSI ports VPD page */
614 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
619 port_a = target_dev_id + 1;
621 arr[num++] = 0x0; /* reserved */
622 arr[num++] = 0x0; /* reserved */
624 arr[num++] = 0x1; /* relative port 1 (primary) */
625 memset(arr + num, 0, 6);
628 arr[num++] = 12; /* length tp descriptor */
629 /* naa-5 target port identifier (A) */
630 arr[num++] = 0x61; /* proto=sas, binary */
631 arr[num++] = 0x93; /* PIV=1, target port, NAA */
632 arr[num++] = 0x0; /* reserved */
633 arr[num++] = 0x8; /* length */
634 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
638 arr[num++] = (port_a >> 24);
639 arr[num++] = (port_a >> 16) & 0xff;
640 arr[num++] = (port_a >> 8) & 0xff;
641 arr[num++] = port_a & 0xff;
643 arr[num++] = 0x0; /* reserved */
644 arr[num++] = 0x0; /* reserved */
646 arr[num++] = 0x2; /* relative port 2 (secondary) */
647 memset(arr + num, 0, 6);
650 arr[num++] = 12; /* length tp descriptor */
651 /* naa-5 target port identifier (B) */
652 arr[num++] = 0x61; /* proto=sas, binary */
653 arr[num++] = 0x93; /* PIV=1, target port, NAA */
654 arr[num++] = 0x0; /* reserved */
655 arr[num++] = 0x8; /* length */
656 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
660 arr[num++] = (port_b >> 24);
661 arr[num++] = (port_b >> 16) & 0xff;
662 arr[num++] = (port_b >> 8) & 0xff;
663 arr[num++] = port_b & 0xff;
669 static unsigned char vpd89_data[] = {
670 /* from 4th byte */ 0,0,0,0,
671 'l','i','n','u','x',' ',' ',' ',
672 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
674 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
676 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
677 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
678 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
679 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
681 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
683 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
685 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
686 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
687 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
689 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
690 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
691 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
696 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
697 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
698 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
707 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
713 static int inquiry_evpd_89(unsigned char * arr)
715 memcpy(arr, vpd89_data, sizeof(vpd89_data));
716 return sizeof(vpd89_data);
720 /* Block limits VPD page (SBC-3) */
721 static unsigned char vpdb0_data[] = {
722 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
723 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
724 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
725 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
728 static int inquiry_evpd_b0(unsigned char * arr)
732 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
734 /* Optimal transfer length granularity */
735 gran = 1 << scsi_debug_physblk_exp;
736 arr[2] = (gran >> 8) & 0xff;
737 arr[3] = gran & 0xff;
739 /* Maximum Transfer Length */
740 if (sdebug_store_sectors > 0x400) {
741 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
742 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
743 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
744 arr[7] = sdebug_store_sectors & 0xff;
747 /* Optimal Transfer Length */
748 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
750 if (scsi_debug_lbpu) {
751 /* Maximum Unmap LBA Count */
752 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
754 /* Maximum Unmap Block Descriptor Count */
755 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
758 /* Unmap Granularity Alignment */
759 if (scsi_debug_unmap_alignment) {
760 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
761 arr[28] |= 0x80; /* UGAVALID */
764 /* Optimal Unmap Granularity */
765 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
767 /* Maximum WRITE SAME Length */
768 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
770 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
772 return sizeof(vpdb0_data);
775 /* Block device characteristics VPD page (SBC-3) */
776 static int inquiry_evpd_b1(unsigned char *arr)
778 memset(arr, 0, 0x3c);
780 arr[1] = 1; /* non rotating medium (e.g. solid state) */
782 arr[3] = 5; /* less than 1.8" */
787 /* Logical block provisioning VPD page (SBC-3) */
788 static int inquiry_evpd_b2(unsigned char *arr)
791 arr[0] = 0; /* threshold exponent */
796 if (scsi_debug_lbpws)
799 if (scsi_debug_lbpws10)
802 if (scsi_debug_lbprz)
808 #define SDEBUG_LONG_INQ_SZ 96
809 #define SDEBUG_MAX_INQ_ARR_SZ 584
811 static int resp_inquiry(struct scsi_cmnd * scp, int target,
812 struct sdebug_dev_info * devip)
814 unsigned char pq_pdt;
816 unsigned char *cmd = (unsigned char *)scp->cmnd;
817 int alloc_len, n, ret;
819 alloc_len = (cmd[3] << 8) + cmd[4];
820 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
822 return DID_REQUEUE << 16;
824 pq_pdt = 0x1e; /* present, wlun */
825 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
826 pq_pdt = 0x7f; /* not present, no device type */
828 pq_pdt = (scsi_debug_ptype & 0x1f);
830 if (0x2 & cmd[1]) { /* CMDDT bit set */
831 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
834 return check_condition_result;
835 } else if (0x1 & cmd[1]) { /* EVPD bit set */
836 int lu_id_num, port_group_id, target_dev_id, len;
838 int host_no = devip->sdbg_host->shost->host_no;
840 port_group_id = (((host_no + 1) & 0x7f) << 8) +
841 (devip->channel & 0x7f);
842 if (0 == scsi_debug_vpd_use_hostno)
844 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
845 (devip->target * 1000) + devip->lun);
846 target_dev_id = ((host_no + 1) * 2000) +
847 (devip->target * 1000) - 3;
848 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
849 if (0 == cmd[2]) { /* supported vital product data pages */
850 arr[1] = cmd[2]; /*sanity */
852 arr[n++] = 0x0; /* this page */
853 arr[n++] = 0x80; /* unit serial number */
854 arr[n++] = 0x83; /* device identification */
855 arr[n++] = 0x84; /* software interface ident. */
856 arr[n++] = 0x85; /* management network addresses */
857 arr[n++] = 0x86; /* extended inquiry */
858 arr[n++] = 0x87; /* mode page policy */
859 arr[n++] = 0x88; /* SCSI ports */
860 arr[n++] = 0x89; /* ATA information */
861 arr[n++] = 0xb0; /* Block limits (SBC) */
862 arr[n++] = 0xb1; /* Block characteristics (SBC) */
863 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
865 arr[3] = n - 4; /* number of supported VPD pages */
866 } else if (0x80 == cmd[2]) { /* unit serial number */
867 arr[1] = cmd[2]; /*sanity */
869 memcpy(&arr[4], lu_id_str, len);
870 } else if (0x83 == cmd[2]) { /* device identification */
871 arr[1] = cmd[2]; /*sanity */
872 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
873 target_dev_id, lu_id_num,
875 } else if (0x84 == cmd[2]) { /* Software interface ident. */
876 arr[1] = cmd[2]; /*sanity */
877 arr[3] = inquiry_evpd_84(&arr[4]);
878 } else if (0x85 == cmd[2]) { /* Management network addresses */
879 arr[1] = cmd[2]; /*sanity */
880 arr[3] = inquiry_evpd_85(&arr[4]);
881 } else if (0x86 == cmd[2]) { /* extended inquiry */
882 arr[1] = cmd[2]; /*sanity */
883 arr[3] = 0x3c; /* number of following entries */
884 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
885 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
886 else if (scsi_debug_dif)
887 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
889 arr[4] = 0x0; /* no protection stuff */
890 arr[5] = 0x7; /* head of q, ordered + simple q's */
891 } else if (0x87 == cmd[2]) { /* mode page policy */
892 arr[1] = cmd[2]; /*sanity */
893 arr[3] = 0x8; /* number of following entries */
894 arr[4] = 0x2; /* disconnect-reconnect mp */
895 arr[6] = 0x80; /* mlus, shared */
896 arr[8] = 0x18; /* protocol specific lu */
897 arr[10] = 0x82; /* mlus, per initiator port */
898 } else if (0x88 == cmd[2]) { /* SCSI Ports */
899 arr[1] = cmd[2]; /*sanity */
900 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
901 } else if (0x89 == cmd[2]) { /* ATA information */
902 arr[1] = cmd[2]; /*sanity */
903 n = inquiry_evpd_89(&arr[4]);
906 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
907 arr[1] = cmd[2]; /*sanity */
908 arr[3] = inquiry_evpd_b0(&arr[4]);
909 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
910 arr[1] = cmd[2]; /*sanity */
911 arr[3] = inquiry_evpd_b1(&arr[4]);
912 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
913 arr[1] = cmd[2]; /*sanity */
914 arr[3] = inquiry_evpd_b2(&arr[4]);
916 /* Illegal request, invalid field in cdb */
917 mk_sense_buffer(devip, ILLEGAL_REQUEST,
918 INVALID_FIELD_IN_CDB, 0);
920 return check_condition_result;
922 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
923 ret = fill_from_dev_buffer(scp, arr,
924 min(len, SDEBUG_MAX_INQ_ARR_SZ));
928 /* drops through here for a standard inquiry */
929 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
930 arr[2] = scsi_debug_scsi_level;
931 arr[3] = 2; /* response_data_format==2 */
932 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
933 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
934 if (0 == scsi_debug_vpd_use_hostno)
935 arr[5] = 0x10; /* claim: implicit TGPS */
936 arr[6] = 0x10; /* claim: MultiP */
937 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
938 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
939 memcpy(&arr[8], inq_vendor_id, 8);
940 memcpy(&arr[16], inq_product_id, 16);
941 memcpy(&arr[32], inq_product_rev, 4);
942 /* version descriptors (2 bytes each) follow */
943 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
944 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
946 if (scsi_debug_ptype == 0) {
947 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
948 } else if (scsi_debug_ptype == 1) {
949 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
951 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
952 ret = fill_from_dev_buffer(scp, arr,
953 min(alloc_len, SDEBUG_LONG_INQ_SZ));
958 static int resp_requests(struct scsi_cmnd * scp,
959 struct sdebug_dev_info * devip)
961 unsigned char * sbuff;
962 unsigned char *cmd = (unsigned char *)scp->cmnd;
963 unsigned char arr[SDEBUG_SENSE_LEN];
967 memset(arr, 0, sizeof(arr));
968 if (devip->reset == 1)
969 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
970 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
971 sbuff = devip->sense_buff;
972 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
975 arr[1] = 0x0; /* NO_SENSE in sense_key */
976 arr[2] = THRESHOLD_EXCEEDED;
977 arr[3] = 0xff; /* TEST set and MRIE==6 */
980 arr[2] = 0x0; /* NO_SENSE in sense_key */
981 arr[7] = 0xa; /* 18 byte sense buffer */
982 arr[12] = THRESHOLD_EXCEEDED;
983 arr[13] = 0xff; /* TEST set and MRIE==6 */
986 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
987 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
988 /* DESC bit set and sense_buff in fixed format */
989 memset(arr, 0, sizeof(arr));
991 arr[1] = sbuff[2]; /* sense key */
992 arr[2] = sbuff[12]; /* asc */
993 arr[3] = sbuff[13]; /* ascq */
997 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
998 return fill_from_dev_buffer(scp, arr, len);
1001 static int resp_start_stop(struct scsi_cmnd * scp,
1002 struct sdebug_dev_info * devip)
1004 unsigned char *cmd = (unsigned char *)scp->cmnd;
1005 int power_cond, errsts, start;
1007 if ((errsts = check_readiness(scp, 1, devip)))
1009 power_cond = (cmd[4] & 0xf0) >> 4;
1011 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1013 return check_condition_result;
1016 if (start == devip->stopped)
1017 devip->stopped = !start;
1021 static sector_t get_sdebug_capacity(void)
1023 if (scsi_debug_virtual_gb > 0)
1024 return (sector_t)scsi_debug_virtual_gb *
1025 (1073741824 / scsi_debug_sector_size);
1027 return sdebug_store_sectors;
1030 #define SDEBUG_READCAP_ARR_SZ 8
1031 static int resp_readcap(struct scsi_cmnd * scp,
1032 struct sdebug_dev_info * devip)
1034 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1038 if ((errsts = check_readiness(scp, 1, devip)))
1040 /* following just in case virtual_gb changed */
1041 sdebug_capacity = get_sdebug_capacity();
1042 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1043 if (sdebug_capacity < 0xffffffff) {
1044 capac = (unsigned int)sdebug_capacity - 1;
1045 arr[0] = (capac >> 24);
1046 arr[1] = (capac >> 16) & 0xff;
1047 arr[2] = (capac >> 8) & 0xff;
1048 arr[3] = capac & 0xff;
1055 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1056 arr[7] = scsi_debug_sector_size & 0xff;
1057 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1060 #define SDEBUG_READCAP16_ARR_SZ 32
1061 static int resp_readcap16(struct scsi_cmnd * scp,
1062 struct sdebug_dev_info * devip)
1064 unsigned char *cmd = (unsigned char *)scp->cmnd;
1065 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1066 unsigned long long capac;
1067 int errsts, k, alloc_len;
1069 if ((errsts = check_readiness(scp, 1, devip)))
1071 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1073 /* following just in case virtual_gb changed */
1074 sdebug_capacity = get_sdebug_capacity();
1075 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1076 capac = sdebug_capacity - 1;
1077 for (k = 0; k < 8; ++k, capac >>= 8)
1078 arr[7 - k] = capac & 0xff;
1079 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1080 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1081 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1082 arr[11] = scsi_debug_sector_size & 0xff;
1083 arr[13] = scsi_debug_physblk_exp & 0xf;
1084 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1086 if (scsi_debug_lbp()) {
1087 arr[14] |= 0x80; /* LBPME */
1088 if (scsi_debug_lbprz)
1089 arr[14] |= 0x40; /* LBPRZ */
1092 arr[15] = scsi_debug_lowest_aligned & 0xff;
1094 if (scsi_debug_dif) {
1095 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1096 arr[12] |= 1; /* PROT_EN */
1099 return fill_from_dev_buffer(scp, arr,
1100 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1103 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1105 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1106 struct sdebug_dev_info * devip)
1108 unsigned char *cmd = (unsigned char *)scp->cmnd;
1109 unsigned char * arr;
1110 int host_no = devip->sdbg_host->shost->host_no;
1111 int n, ret, alen, rlen;
1112 int port_group_a, port_group_b, port_a, port_b;
1114 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1117 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1119 return DID_REQUEUE << 16;
1121 * EVPD page 0x88 states we have two ports, one
1122 * real and a fake port with no device connected.
1123 * So we create two port groups with one port each
1124 * and set the group with port B to unavailable.
1126 port_a = 0x1; /* relative port A */
1127 port_b = 0x2; /* relative port B */
1128 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1129 (devip->channel & 0x7f);
1130 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1131 (devip->channel & 0x7f) + 0x80;
1134 * The asymmetric access state is cycled according to the host_id.
1137 if (0 == scsi_debug_vpd_use_hostno) {
1138 arr[n++] = host_no % 3; /* Asymm access state */
1139 arr[n++] = 0x0F; /* claim: all states are supported */
1141 arr[n++] = 0x0; /* Active/Optimized path */
1142 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1144 arr[n++] = (port_group_a >> 8) & 0xff;
1145 arr[n++] = port_group_a & 0xff;
1146 arr[n++] = 0; /* Reserved */
1147 arr[n++] = 0; /* Status code */
1148 arr[n++] = 0; /* Vendor unique */
1149 arr[n++] = 0x1; /* One port per group */
1150 arr[n++] = 0; /* Reserved */
1151 arr[n++] = 0; /* Reserved */
1152 arr[n++] = (port_a >> 8) & 0xff;
1153 arr[n++] = port_a & 0xff;
1154 arr[n++] = 3; /* Port unavailable */
1155 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1156 arr[n++] = (port_group_b >> 8) & 0xff;
1157 arr[n++] = port_group_b & 0xff;
1158 arr[n++] = 0; /* Reserved */
1159 arr[n++] = 0; /* Status code */
1160 arr[n++] = 0; /* Vendor unique */
1161 arr[n++] = 0x1; /* One port per group */
1162 arr[n++] = 0; /* Reserved */
1163 arr[n++] = 0; /* Reserved */
1164 arr[n++] = (port_b >> 8) & 0xff;
1165 arr[n++] = port_b & 0xff;
1168 arr[0] = (rlen >> 24) & 0xff;
1169 arr[1] = (rlen >> 16) & 0xff;
1170 arr[2] = (rlen >> 8) & 0xff;
1171 arr[3] = rlen & 0xff;
1174 * Return the smallest value of either
1175 * - The allocated length
1176 * - The constructed command length
1177 * - The maximum array size
1180 ret = fill_from_dev_buffer(scp, arr,
1181 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1186 /* <<Following mode page info copied from ST318451LW>> */
1188 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1189 { /* Read-Write Error Recovery page for mode_sense */
1190 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1193 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1195 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1196 return sizeof(err_recov_pg);
1199 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1200 { /* Disconnect-Reconnect page for mode_sense */
1201 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1202 0, 0, 0, 0, 0, 0, 0, 0};
1204 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1206 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1207 return sizeof(disconnect_pg);
1210 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1211 { /* Format device page for mode_sense */
1212 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1213 0, 0, 0, 0, 0, 0, 0, 0,
1214 0, 0, 0, 0, 0x40, 0, 0, 0};
1216 memcpy(p, format_pg, sizeof(format_pg));
1217 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1218 p[11] = sdebug_sectors_per & 0xff;
1219 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1220 p[13] = scsi_debug_sector_size & 0xff;
1221 if (scsi_debug_removable)
1222 p[20] |= 0x20; /* should agree with INQUIRY */
1224 memset(p + 2, 0, sizeof(format_pg) - 2);
1225 return sizeof(format_pg);
1228 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1229 { /* Caching page for mode_sense */
1230 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1231 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1233 memcpy(p, caching_pg, sizeof(caching_pg));
1235 memset(p + 2, 0, sizeof(caching_pg) - 2);
1236 return sizeof(caching_pg);
1239 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1240 { /* Control mode page for mode_sense */
1241 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1243 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1246 if (scsi_debug_dsense)
1247 ctrl_m_pg[2] |= 0x4;
1249 ctrl_m_pg[2] &= ~0x4;
1252 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1254 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1256 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1257 else if (2 == pcontrol)
1258 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1259 return sizeof(ctrl_m_pg);
1263 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1264 { /* Informational Exceptions control mode page for mode_sense */
1265 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1267 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1270 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1272 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1273 else if (2 == pcontrol)
1274 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1275 return sizeof(iec_m_pg);
1278 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1279 { /* SAS SSP mode page - short format for mode_sense */
1280 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1281 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1283 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1285 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1286 return sizeof(sas_sf_m_pg);
1290 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1292 { /* SAS phy control and discover mode page for mode_sense */
1293 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1294 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1295 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1296 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1297 0x2, 0, 0, 0, 0, 0, 0, 0,
1298 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1299 0, 0, 0, 0, 0, 0, 0, 0,
1300 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1301 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1302 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1303 0x3, 0, 0, 0, 0, 0, 0, 0,
1304 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1305 0, 0, 0, 0, 0, 0, 0, 0,
1309 port_a = target_dev_id + 1;
1310 port_b = port_a + 1;
1311 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1312 p[20] = (port_a >> 24);
1313 p[21] = (port_a >> 16) & 0xff;
1314 p[22] = (port_a >> 8) & 0xff;
1315 p[23] = port_a & 0xff;
1316 p[48 + 20] = (port_b >> 24);
1317 p[48 + 21] = (port_b >> 16) & 0xff;
1318 p[48 + 22] = (port_b >> 8) & 0xff;
1319 p[48 + 23] = port_b & 0xff;
1321 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1322 return sizeof(sas_pcd_m_pg);
1325 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1326 { /* SAS SSP shared protocol specific port mode subpage */
1327 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1328 0, 0, 0, 0, 0, 0, 0, 0,
1331 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1333 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1334 return sizeof(sas_sha_m_pg);
1337 #define SDEBUG_MAX_MSENSE_SZ 256
1339 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1340 struct sdebug_dev_info * devip)
1342 unsigned char dbd, llbaa;
1343 int pcontrol, pcode, subpcode, bd_len;
1344 unsigned char dev_spec;
1345 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1347 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1348 unsigned char *cmd = (unsigned char *)scp->cmnd;
1350 if ((errsts = check_readiness(scp, 1, devip)))
1352 dbd = !!(cmd[1] & 0x8);
1353 pcontrol = (cmd[2] & 0xc0) >> 6;
1354 pcode = cmd[2] & 0x3f;
1356 msense_6 = (MODE_SENSE == cmd[0]);
1357 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1358 if ((0 == scsi_debug_ptype) && (0 == dbd))
1359 bd_len = llbaa ? 16 : 8;
1362 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1363 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1364 if (0x3 == pcontrol) { /* Saving values not supported */
1365 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1367 return check_condition_result;
1369 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1370 (devip->target * 1000) - 3;
1371 /* set DPOFUA bit for disks */
1372 if (0 == scsi_debug_ptype)
1373 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1383 arr[4] = 0x1; /* set LONGLBA bit */
1384 arr[7] = bd_len; /* assume 255 or less */
1388 if ((bd_len > 0) && (!sdebug_capacity))
1389 sdebug_capacity = get_sdebug_capacity();
1392 if (sdebug_capacity > 0xfffffffe) {
1398 ap[0] = (sdebug_capacity >> 24) & 0xff;
1399 ap[1] = (sdebug_capacity >> 16) & 0xff;
1400 ap[2] = (sdebug_capacity >> 8) & 0xff;
1401 ap[3] = sdebug_capacity & 0xff;
1403 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1404 ap[7] = scsi_debug_sector_size & 0xff;
1407 } else if (16 == bd_len) {
1408 unsigned long long capac = sdebug_capacity;
1410 for (k = 0; k < 8; ++k, capac >>= 8)
1411 ap[7 - k] = capac & 0xff;
1412 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1413 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1414 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1415 ap[15] = scsi_debug_sector_size & 0xff;
1420 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1421 /* TODO: Control Extension page */
1422 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1424 return check_condition_result;
1427 case 0x1: /* Read-Write error recovery page, direct access */
1428 len = resp_err_recov_pg(ap, pcontrol, target);
1431 case 0x2: /* Disconnect-Reconnect page, all devices */
1432 len = resp_disconnect_pg(ap, pcontrol, target);
1435 case 0x3: /* Format device page, direct access */
1436 len = resp_format_pg(ap, pcontrol, target);
1439 case 0x8: /* Caching page, direct access */
1440 len = resp_caching_pg(ap, pcontrol, target);
1443 case 0xa: /* Control Mode page, all devices */
1444 len = resp_ctrl_m_pg(ap, pcontrol, target);
1447 case 0x19: /* if spc==1 then sas phy, control+discover */
1448 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1449 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1450 INVALID_FIELD_IN_CDB, 0);
1451 return check_condition_result;
1454 if ((0x0 == subpcode) || (0xff == subpcode))
1455 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1456 if ((0x1 == subpcode) || (0xff == subpcode))
1457 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1459 if ((0x2 == subpcode) || (0xff == subpcode))
1460 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1463 case 0x1c: /* Informational Exceptions Mode page, all devices */
1464 len = resp_iec_m_pg(ap, pcontrol, target);
1467 case 0x3f: /* Read all Mode pages */
1468 if ((0 == subpcode) || (0xff == subpcode)) {
1469 len = resp_err_recov_pg(ap, pcontrol, target);
1470 len += resp_disconnect_pg(ap + len, pcontrol, target);
1471 len += resp_format_pg(ap + len, pcontrol, target);
1472 len += resp_caching_pg(ap + len, pcontrol, target);
1473 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1474 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1475 if (0xff == subpcode) {
1476 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1477 target, target_dev_id);
1478 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1480 len += resp_iec_m_pg(ap + len, pcontrol, target);
1482 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1483 INVALID_FIELD_IN_CDB, 0);
1484 return check_condition_result;
1489 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1491 return check_condition_result;
1494 arr[0] = offset - 1;
1496 arr[0] = ((offset - 2) >> 8) & 0xff;
1497 arr[1] = (offset - 2) & 0xff;
1499 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1502 #define SDEBUG_MAX_MSELECT_SZ 512
1504 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1505 struct sdebug_dev_info * devip)
1507 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1508 int param_len, res, errsts, mpage;
1509 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1510 unsigned char *cmd = (unsigned char *)scp->cmnd;
1512 if ((errsts = check_readiness(scp, 1, devip)))
1514 memset(arr, 0, sizeof(arr));
1517 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1518 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1519 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1520 INVALID_FIELD_IN_CDB, 0);
1521 return check_condition_result;
1523 res = fetch_to_dev_buffer(scp, arr, param_len);
1525 return (DID_ERROR << 16);
1526 else if ((res < param_len) &&
1527 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1528 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1529 " IO sent=%d bytes\n", param_len, res);
1530 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1531 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1533 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1534 INVALID_FIELD_IN_PARAM_LIST, 0);
1535 return check_condition_result;
1537 off = bd_len + (mselect6 ? 4 : 8);
1538 mpage = arr[off] & 0x3f;
1539 ps = !!(arr[off] & 0x80);
1541 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1542 INVALID_FIELD_IN_PARAM_LIST, 0);
1543 return check_condition_result;
1545 spf = !!(arr[off] & 0x40);
1546 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1548 if ((pg_len + off) > param_len) {
1549 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1550 PARAMETER_LIST_LENGTH_ERR, 0);
1551 return check_condition_result;
1554 case 0xa: /* Control Mode page */
1555 if (ctrl_m_pg[1] == arr[off + 1]) {
1556 memcpy(ctrl_m_pg + 2, arr + off + 2,
1557 sizeof(ctrl_m_pg) - 2);
1558 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1562 case 0x1c: /* Informational Exceptions Mode page */
1563 if (iec_m_pg[1] == arr[off + 1]) {
1564 memcpy(iec_m_pg + 2, arr + off + 2,
1565 sizeof(iec_m_pg) - 2);
1572 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1573 INVALID_FIELD_IN_PARAM_LIST, 0);
1574 return check_condition_result;
1577 static int resp_temp_l_pg(unsigned char * arr)
1579 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1580 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1583 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1584 return sizeof(temp_l_pg);
1587 static int resp_ie_l_pg(unsigned char * arr)
1589 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1592 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1593 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1594 arr[4] = THRESHOLD_EXCEEDED;
1597 return sizeof(ie_l_pg);
1600 #define SDEBUG_MAX_LSENSE_SZ 512
1602 static int resp_log_sense(struct scsi_cmnd * scp,
1603 struct sdebug_dev_info * devip)
1605 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1606 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1607 unsigned char *cmd = (unsigned char *)scp->cmnd;
1609 if ((errsts = check_readiness(scp, 1, devip)))
1611 memset(arr, 0, sizeof(arr));
1615 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1616 INVALID_FIELD_IN_CDB, 0);
1617 return check_condition_result;
1619 pcontrol = (cmd[2] & 0xc0) >> 6;
1620 pcode = cmd[2] & 0x3f;
1621 subpcode = cmd[3] & 0xff;
1622 alloc_len = (cmd[7] << 8) + cmd[8];
1624 if (0 == subpcode) {
1626 case 0x0: /* Supported log pages log page */
1628 arr[n++] = 0x0; /* this page */
1629 arr[n++] = 0xd; /* Temperature */
1630 arr[n++] = 0x2f; /* Informational exceptions */
1633 case 0xd: /* Temperature log page */
1634 arr[3] = resp_temp_l_pg(arr + 4);
1636 case 0x2f: /* Informational exceptions log page */
1637 arr[3] = resp_ie_l_pg(arr + 4);
1640 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1641 INVALID_FIELD_IN_CDB, 0);
1642 return check_condition_result;
1644 } else if (0xff == subpcode) {
1648 case 0x0: /* Supported log pages and subpages log page */
1651 arr[n++] = 0x0; /* 0,0 page */
1653 arr[n++] = 0xff; /* this page */
1655 arr[n++] = 0x0; /* Temperature */
1657 arr[n++] = 0x0; /* Informational exceptions */
1660 case 0xd: /* Temperature subpages */
1663 arr[n++] = 0x0; /* Temperature */
1666 case 0x2f: /* Informational exceptions subpages */
1669 arr[n++] = 0x0; /* Informational exceptions */
1673 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1674 INVALID_FIELD_IN_CDB, 0);
1675 return check_condition_result;
1678 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1679 INVALID_FIELD_IN_CDB, 0);
1680 return check_condition_result;
1682 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1683 return fill_from_dev_buffer(scp, arr,
1684 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1687 static int check_device_access_params(struct sdebug_dev_info *devi,
1688 unsigned long long lba, unsigned int num)
1690 if (lba + num > sdebug_capacity) {
1691 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1692 return check_condition_result;
1694 /* transfer length excessive (tie in to block limits VPD page) */
1695 if (num > sdebug_store_sectors) {
1696 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1697 return check_condition_result;
1702 /* Returns number of bytes copied or -1 if error. */
1703 static int do_device_access(struct scsi_cmnd *scmd,
1704 struct sdebug_dev_info *devi,
1705 unsigned long long lba, unsigned int num, int write)
1708 unsigned long long block, rest = 0;
1709 struct scsi_data_buffer *sdb;
1710 enum dma_data_direction dir;
1711 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1715 sdb = scsi_out(scmd);
1716 dir = DMA_TO_DEVICE;
1717 func = sg_pcopy_to_buffer;
1719 sdb = scsi_in(scmd);
1720 dir = DMA_FROM_DEVICE;
1721 func = sg_pcopy_from_buffer;
1726 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1729 block = do_div(lba, sdebug_store_sectors);
1730 if (block + num > sdebug_store_sectors)
1731 rest = block + num - sdebug_store_sectors;
1733 ret = func(sdb->table.sgl, sdb->table.nents,
1734 fake_storep + (block * scsi_debug_sector_size),
1735 (num - rest) * scsi_debug_sector_size, 0);
1736 if (ret != (num - rest) * scsi_debug_sector_size)
1740 ret += func(sdb->table.sgl, sdb->table.nents,
1741 fake_storep, rest * scsi_debug_sector_size,
1742 (num - rest) * scsi_debug_sector_size);
1748 static __be16 dif_compute_csum(const void *buf, int len)
1752 if (scsi_debug_guard)
1753 csum = (__force __be16)ip_compute_csum(buf, len);
1755 csum = cpu_to_be16(crc_t10dif(buf, len));
1760 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1761 sector_t sector, u32 ei_lba)
1763 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1765 if (sdt->guard_tag != csum) {
1766 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1768 (unsigned long)sector,
1769 be16_to_cpu(sdt->guard_tag),
1773 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1774 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1775 pr_err("%s: REF check failed on sector %lu\n",
1776 __func__, (unsigned long)sector);
1779 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1780 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1781 pr_err("%s: REF check failed on sector %lu\n",
1782 __func__, (unsigned long)sector);
1789 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1790 unsigned int sectors, bool read)
1792 unsigned int i, resid;
1793 struct scatterlist *psgl;
1795 const void *dif_store_end = dif_storep + sdebug_store_sectors;
1797 /* Bytes of protection data to copy into sgl */
1798 resid = sectors * sizeof(*dif_storep);
1800 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1801 int len = min(psgl->length, resid);
1802 void *start = dif_store(sector);
1805 if (dif_store_end < start + len)
1806 rest = start + len - dif_store_end;
1808 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1811 memcpy(paddr, start, len - rest);
1813 memcpy(start, paddr, len - rest);
1817 memcpy(paddr + len - rest, dif_storep, rest);
1819 memcpy(dif_storep, paddr + len - rest, rest);
1822 sector += len / sizeof(*dif_storep);
1824 kunmap_atomic(paddr);
1828 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1829 unsigned int sectors, u32 ei_lba)
1832 struct sd_dif_tuple *sdt;
1835 for (i = 0; i < sectors; i++) {
1838 sector = start_sec + i;
1839 sdt = dif_store(sector);
1841 if (sdt->app_tag == cpu_to_be16(0xffff))
1844 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1853 dif_copy_prot(SCpnt, start_sec, sectors, true);
1859 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1860 unsigned int num, struct sdebug_dev_info *devip,
1863 unsigned long iflags;
1866 ret = check_device_access_params(devip, lba, num);
1870 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1871 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1872 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1873 /* claim unrecoverable read error */
1874 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1875 /* set info field and valid bit for fixed descriptor */
1876 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1877 devip->sense_buff[0] |= 0x80; /* Valid bit */
1878 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1879 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1880 devip->sense_buff[3] = (ret >> 24) & 0xff;
1881 devip->sense_buff[4] = (ret >> 16) & 0xff;
1882 devip->sense_buff[5] = (ret >> 8) & 0xff;
1883 devip->sense_buff[6] = ret & 0xff;
1885 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1886 return check_condition_result;
1890 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1891 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1894 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1895 return illegal_condition_result;
1899 read_lock_irqsave(&atomic_rw, iflags);
1900 ret = do_device_access(SCpnt, devip, lba, num, 0);
1901 read_unlock_irqrestore(&atomic_rw, iflags);
1903 return DID_ERROR << 16;
1905 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
1910 void dump_sector(unsigned char *buf, int len)
1914 printk(KERN_ERR ">>> Sector Dump <<<\n");
1916 for (i = 0 ; i < len ; i += 16) {
1917 printk(KERN_ERR "%04d: ", i);
1919 for (j = 0 ; j < 16 ; j++) {
1920 unsigned char c = buf[i+j];
1921 if (c >= 0x20 && c < 0x7e)
1922 printk(" %c ", buf[i+j]);
1924 printk("%02x ", buf[i+j]);
1931 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1932 unsigned int sectors, u32 ei_lba)
1935 struct sd_dif_tuple *sdt;
1936 struct scatterlist *dsgl;
1937 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1938 void *daddr, *paddr;
1939 sector_t sector = start_sec;
1942 BUG_ON(scsi_sg_count(SCpnt) == 0);
1943 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1947 /* For each data page */
1948 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1949 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1950 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1952 /* For each sector-sized chunk in data page */
1953 for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) {
1955 /* If we're at the end of the current
1956 * protection page advance to the next one
1958 if (ppage_offset >= psgl->length) {
1959 kunmap_atomic(paddr);
1960 psgl = sg_next(psgl);
1961 BUG_ON(psgl == NULL);
1962 paddr = kmap_atomic(sg_page(psgl))
1967 sdt = paddr + ppage_offset;
1969 ret = dif_verify(sdt, daddr + j, sector, ei_lba);
1971 dump_sector(daddr + j, scsi_debug_sector_size);
1977 ppage_offset += sizeof(struct sd_dif_tuple);
1980 kunmap_atomic(paddr);
1981 kunmap_atomic(daddr);
1984 dif_copy_prot(SCpnt, start_sec, sectors, false);
1991 kunmap_atomic(paddr);
1992 kunmap_atomic(daddr);
1996 static unsigned long lba_to_map_index(sector_t lba)
1998 if (scsi_debug_unmap_alignment) {
1999 lba += scsi_debug_unmap_granularity -
2000 scsi_debug_unmap_alignment;
2002 do_div(lba, scsi_debug_unmap_granularity);
2007 static sector_t map_index_to_lba(unsigned long index)
2009 sector_t lba = index * scsi_debug_unmap_granularity;
2011 if (scsi_debug_unmap_alignment) {
2012 lba -= scsi_debug_unmap_granularity -
2013 scsi_debug_unmap_alignment;
2019 static unsigned int map_state(sector_t lba, unsigned int *num)
2022 unsigned int mapped;
2023 unsigned long index;
2026 index = lba_to_map_index(lba);
2027 mapped = test_bit(index, map_storep);
2030 next = find_next_zero_bit(map_storep, map_size, index);
2032 next = find_next_bit(map_storep, map_size, index);
2034 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2040 static void map_region(sector_t lba, unsigned int len)
2042 sector_t end = lba + len;
2045 unsigned long index = lba_to_map_index(lba);
2047 if (index < map_size)
2048 set_bit(index, map_storep);
2050 lba = map_index_to_lba(index + 1);
2054 static void unmap_region(sector_t lba, unsigned int len)
2056 sector_t end = lba + len;
2059 unsigned long index = lba_to_map_index(lba);
2061 if (lba == map_index_to_lba(index) &&
2062 lba + scsi_debug_unmap_granularity <= end &&
2064 clear_bit(index, map_storep);
2065 if (scsi_debug_lbprz) {
2066 memset(fake_storep +
2067 lba * scsi_debug_sector_size, 0,
2068 scsi_debug_sector_size *
2069 scsi_debug_unmap_granularity);
2072 memset(dif_storep + lba, 0xff,
2073 sizeof(*dif_storep) *
2074 scsi_debug_unmap_granularity);
2077 lba = map_index_to_lba(index + 1);
2081 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2082 unsigned int num, struct sdebug_dev_info *devip,
2085 unsigned long iflags;
2088 ret = check_device_access_params(devip, lba, num);
2093 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2094 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2097 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2098 return illegal_condition_result;
2102 write_lock_irqsave(&atomic_rw, iflags);
2103 ret = do_device_access(SCpnt, devip, lba, num, 1);
2104 if (scsi_debug_lbp())
2105 map_region(lba, num);
2106 write_unlock_irqrestore(&atomic_rw, iflags);
2108 return (DID_ERROR << 16);
2109 else if ((ret < (num * scsi_debug_sector_size)) &&
2110 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2111 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2112 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2117 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2118 unsigned int num, struct sdebug_dev_info *devip,
2119 u32 ei_lba, unsigned int unmap)
2121 unsigned long iflags;
2122 unsigned long long i;
2125 ret = check_device_access_params(devip, lba, num);
2129 if (num > scsi_debug_write_same_length) {
2130 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2132 return check_condition_result;
2135 write_lock_irqsave(&atomic_rw, iflags);
2137 if (unmap && scsi_debug_lbp()) {
2138 unmap_region(lba, num);
2142 /* Else fetch one logical block */
2143 ret = fetch_to_dev_buffer(scmd,
2144 fake_storep + (lba * scsi_debug_sector_size),
2145 scsi_debug_sector_size);
2148 write_unlock_irqrestore(&atomic_rw, iflags);
2149 return (DID_ERROR << 16);
2150 } else if ((ret < (num * scsi_debug_sector_size)) &&
2151 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2152 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2153 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2155 /* Copy first sector to remaining blocks */
2156 for (i = 1 ; i < num ; i++)
2157 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2158 fake_storep + (lba * scsi_debug_sector_size),
2159 scsi_debug_sector_size);
2161 if (scsi_debug_lbp())
2162 map_region(lba, num);
2164 write_unlock_irqrestore(&atomic_rw, iflags);
2169 struct unmap_block_desc {
2175 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2178 struct unmap_block_desc *desc;
2179 unsigned int i, payload_len, descriptors;
2182 ret = check_readiness(scmd, 1, devip);
2186 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2187 BUG_ON(scsi_bufflen(scmd) != payload_len);
2189 descriptors = (payload_len - 8) / 16;
2191 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2193 return check_condition_result;
2195 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2197 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2198 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2200 desc = (void *)&buf[8];
2202 for (i = 0 ; i < descriptors ; i++) {
2203 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2204 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2206 ret = check_device_access_params(devip, lba, num);
2210 unmap_region(lba, num);
2221 #define SDEBUG_GET_LBA_STATUS_LEN 32
2223 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2224 struct sdebug_dev_info * devip)
2226 unsigned long long lba;
2227 unsigned int alloc_len, mapped, num;
2228 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2231 ret = check_readiness(scmd, 1, devip);
2235 lba = get_unaligned_be64(&scmd->cmnd[2]);
2236 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2241 ret = check_device_access_params(devip, lba, 1);
2245 mapped = map_state(lba, &num);
2247 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2248 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2249 put_unaligned_be64(lba, &arr[8]); /* LBA */
2250 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2251 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2253 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2256 #define SDEBUG_RLUN_ARR_SZ 256
2258 static int resp_report_luns(struct scsi_cmnd * scp,
2259 struct sdebug_dev_info * devip)
2261 unsigned int alloc_len;
2262 int lun_cnt, i, upper, num, n, wlun, lun;
2263 unsigned char *cmd = (unsigned char *)scp->cmnd;
2264 int select_report = (int)cmd[2];
2265 struct scsi_lun *one_lun;
2266 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2267 unsigned char * max_addr;
2269 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2270 if ((alloc_len < 4) || (select_report > 2)) {
2271 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2273 return check_condition_result;
2275 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2276 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2277 lun_cnt = scsi_debug_max_luns;
2278 if (1 == select_report)
2280 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2282 wlun = (select_report > 0) ? 1 : 0;
2283 num = lun_cnt + wlun;
2284 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2285 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2286 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2287 sizeof(struct scsi_lun)), num);
2292 one_lun = (struct scsi_lun *) &arr[8];
2293 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2294 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2295 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2297 upper = (lun >> 8) & 0x3f;
2299 one_lun[i].scsi_lun[0] =
2300 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2301 one_lun[i].scsi_lun[1] = lun & 0xff;
2304 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2305 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2308 alloc_len = (unsigned char *)(one_lun + i) - arr;
2309 return fill_from_dev_buffer(scp, arr,
2310 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2313 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2314 unsigned int num, struct sdebug_dev_info *devip)
2317 unsigned char *kaddr, *buf;
2318 unsigned int offset;
2319 struct scatterlist *sg;
2320 struct scsi_data_buffer *sdb = scsi_in(scp);
2322 /* better not to use temporary buffer. */
2323 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2327 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2330 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2331 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2335 for (j = 0; j < sg->length; j++)
2336 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2338 offset += sg->length;
2339 kunmap_atomic(kaddr);
2348 /* When timer goes off this function is called. */
2349 static void timer_intr_handler(unsigned long indx)
2351 struct sdebug_queued_cmd * sqcp;
2352 unsigned long iflags;
2354 if (indx >= scsi_debug_max_queue) {
2355 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2359 spin_lock_irqsave(&queued_arr_lock, iflags);
2360 sqcp = &queued_arr[(int)indx];
2361 if (! sqcp->in_use) {
2362 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2364 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2368 if (sqcp->done_funct) {
2369 sqcp->a_cmnd->result = sqcp->scsi_result;
2370 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2372 sqcp->done_funct = NULL;
2373 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2377 static struct sdebug_dev_info *
2378 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2380 struct sdebug_dev_info *devip;
2382 devip = kzalloc(sizeof(*devip), flags);
2384 devip->sdbg_host = sdbg_host;
2385 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2390 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2392 struct sdebug_host_info * sdbg_host;
2393 struct sdebug_dev_info * open_devip = NULL;
2394 struct sdebug_dev_info * devip =
2395 (struct sdebug_dev_info *)sdev->hostdata;
2399 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2401 printk(KERN_ERR "Host info NULL\n");
2404 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2405 if ((devip->used) && (devip->channel == sdev->channel) &&
2406 (devip->target == sdev->id) &&
2407 (devip->lun == sdev->lun))
2410 if ((!devip->used) && (!open_devip))
2414 if (!open_devip) { /* try and make a new one */
2415 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2417 printk(KERN_ERR "%s: out of memory at line %d\n",
2418 __func__, __LINE__);
2423 open_devip->channel = sdev->channel;
2424 open_devip->target = sdev->id;
2425 open_devip->lun = sdev->lun;
2426 open_devip->sdbg_host = sdbg_host;
2427 open_devip->reset = 1;
2428 open_devip->used = 1;
2429 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2430 if (scsi_debug_dsense)
2431 open_devip->sense_buff[0] = 0x72;
2433 open_devip->sense_buff[0] = 0x70;
2434 open_devip->sense_buff[7] = 0xa;
2436 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2437 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2442 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2444 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2445 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2446 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2447 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2451 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2453 struct sdebug_dev_info *devip;
2455 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2456 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2457 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2458 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2459 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2460 devip = devInfoReg(sdp);
2462 return 1; /* no resources, will be marked offline */
2463 sdp->hostdata = devip;
2464 if (sdp->host->cmd_per_lun)
2465 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2466 sdp->host->cmd_per_lun);
2467 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2468 if (scsi_debug_no_uld)
2469 sdp->no_uld_attach = 1;
2473 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2475 struct sdebug_dev_info *devip =
2476 (struct sdebug_dev_info *)sdp->hostdata;
2478 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2479 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2480 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2482 /* make this slot available for re-use */
2484 sdp->hostdata = NULL;
2488 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2489 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2491 unsigned long iflags;
2493 struct sdebug_queued_cmd *sqcp;
2495 spin_lock_irqsave(&queued_arr_lock, iflags);
2496 for (k = 0; k < scsi_debug_max_queue; ++k) {
2497 sqcp = &queued_arr[k];
2498 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2499 del_timer_sync(&sqcp->cmnd_timer);
2501 sqcp->a_cmnd = NULL;
2505 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2506 return (k < scsi_debug_max_queue) ? 1 : 0;
2509 /* Deletes (stops) timers of all queued commands */
2510 static void stop_all_queued(void)
2512 unsigned long iflags;
2514 struct sdebug_queued_cmd *sqcp;
2516 spin_lock_irqsave(&queued_arr_lock, iflags);
2517 for (k = 0; k < scsi_debug_max_queue; ++k) {
2518 sqcp = &queued_arr[k];
2519 if (sqcp->in_use && sqcp->a_cmnd) {
2520 del_timer_sync(&sqcp->cmnd_timer);
2522 sqcp->a_cmnd = NULL;
2525 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2528 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2530 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2531 printk(KERN_INFO "scsi_debug: abort\n");
2533 stop_queued_cmnd(SCpnt);
2537 static int scsi_debug_biosparam(struct scsi_device *sdev,
2538 struct block_device * bdev, sector_t capacity, int *info)
2543 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2544 printk(KERN_INFO "scsi_debug: biosparam\n");
2545 buf = scsi_bios_ptable(bdev);
2547 res = scsi_partsize(buf, capacity,
2548 &info[2], &info[0], &info[1]);
2553 info[0] = sdebug_heads;
2554 info[1] = sdebug_sectors_per;
2555 info[2] = sdebug_cylinders_per;
2559 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2561 struct sdebug_dev_info * devip;
2563 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2564 printk(KERN_INFO "scsi_debug: device_reset\n");
2567 devip = devInfoReg(SCpnt->device);
2574 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2576 struct sdebug_host_info *sdbg_host;
2577 struct sdebug_dev_info * dev_info;
2578 struct scsi_device * sdp;
2579 struct Scsi_Host * hp;
2581 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2582 printk(KERN_INFO "scsi_debug: bus_reset\n");
2584 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2585 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2587 list_for_each_entry(dev_info,
2588 &sdbg_host->dev_info_list,
2590 dev_info->reset = 1;
2596 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2598 struct sdebug_host_info * sdbg_host;
2599 struct sdebug_dev_info * dev_info;
2601 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2602 printk(KERN_INFO "scsi_debug: host_reset\n");
2604 spin_lock(&sdebug_host_list_lock);
2605 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2606 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2608 dev_info->reset = 1;
2610 spin_unlock(&sdebug_host_list_lock);
2615 /* Initializes timers in queued array */
2616 static void __init init_all_queued(void)
2618 unsigned long iflags;
2620 struct sdebug_queued_cmd * sqcp;
2622 spin_lock_irqsave(&queued_arr_lock, iflags);
2623 for (k = 0; k < scsi_debug_max_queue; ++k) {
2624 sqcp = &queued_arr[k];
2625 init_timer(&sqcp->cmnd_timer);
2627 sqcp->a_cmnd = NULL;
2629 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2632 static void __init sdebug_build_parts(unsigned char *ramp,
2633 unsigned long store_size)
2635 struct partition * pp;
2636 int starts[SDEBUG_MAX_PARTS + 2];
2637 int sectors_per_part, num_sectors, k;
2638 int heads_by_sects, start_sec, end_sec;
2640 /* assume partition table already zeroed */
2641 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2643 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2644 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2645 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2646 "partitions to %d\n", SDEBUG_MAX_PARTS);
2648 num_sectors = (int)sdebug_store_sectors;
2649 sectors_per_part = (num_sectors - sdebug_sectors_per)
2650 / scsi_debug_num_parts;
2651 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2652 starts[0] = sdebug_sectors_per;
2653 for (k = 1; k < scsi_debug_num_parts; ++k)
2654 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2656 starts[scsi_debug_num_parts] = num_sectors;
2657 starts[scsi_debug_num_parts + 1] = 0;
2659 ramp[510] = 0x55; /* magic partition markings */
2661 pp = (struct partition *)(ramp + 0x1be);
2662 for (k = 0; starts[k + 1]; ++k, ++pp) {
2663 start_sec = starts[k];
2664 end_sec = starts[k + 1] - 1;
2667 pp->cyl = start_sec / heads_by_sects;
2668 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2669 / sdebug_sectors_per;
2670 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2672 pp->end_cyl = end_sec / heads_by_sects;
2673 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2674 / sdebug_sectors_per;
2675 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2677 pp->start_sect = cpu_to_le32(start_sec);
2678 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
2679 pp->sys_ind = 0x83; /* plain Linux partition */
2683 static int schedule_resp(struct scsi_cmnd * cmnd,
2684 struct sdebug_dev_info * devip,
2685 done_funct_t done, int scsi_result, int delta_jiff)
2687 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2689 struct scsi_device * sdp = cmnd->device;
2691 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2692 "non-zero result=0x%x\n", sdp->host->host_no,
2693 sdp->channel, sdp->id, sdp->lun, scsi_result);
2696 if (cmnd && devip) {
2697 /* simulate autosense by this driver */
2698 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2699 memcpy(cmnd->sense_buffer, devip->sense_buff,
2700 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2701 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2703 if (delta_jiff <= 0) {
2705 cmnd->result = scsi_result;
2710 unsigned long iflags;
2712 struct sdebug_queued_cmd * sqcp = NULL;
2714 spin_lock_irqsave(&queued_arr_lock, iflags);
2715 for (k = 0; k < scsi_debug_max_queue; ++k) {
2716 sqcp = &queued_arr[k];
2720 if (k >= scsi_debug_max_queue) {
2721 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2722 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2723 return 1; /* report busy to mid level */
2726 sqcp->a_cmnd = cmnd;
2727 sqcp->scsi_result = scsi_result;
2728 sqcp->done_funct = done;
2729 sqcp->cmnd_timer.function = timer_intr_handler;
2730 sqcp->cmnd_timer.data = k;
2731 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2732 add_timer(&sqcp->cmnd_timer);
2733 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2739 /* Note: The following macros create attribute files in the
2740 /sys/module/scsi_debug/parameters directory. Unfortunately this
2741 driver is unaware of a change and cannot trigger auxiliary actions
2742 as it can when the corresponding attribute in the
2743 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2745 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2746 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2747 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2748 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2749 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2750 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2751 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2752 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2753 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2754 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
2755 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2756 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2757 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2758 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2759 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2760 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2761 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2762 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2763 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2764 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2765 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2766 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2767 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2768 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2769 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2770 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2771 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2772 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2773 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2774 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2775 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2776 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2777 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2778 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2780 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2783 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2784 MODULE_DESCRIPTION("SCSI debug adapter driver");
2785 MODULE_LICENSE("GPL");
2786 MODULE_VERSION(SCSI_DEBUG_VERSION);
2788 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2789 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2790 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2791 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2792 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2793 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2794 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2795 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2796 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2797 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2798 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2799 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2800 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2801 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2802 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2803 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2804 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2805 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2806 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2807 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2808 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2809 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2810 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2811 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2812 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2813 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2814 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2815 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2816 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2817 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2818 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2819 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2820 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2821 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2822 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2824 static char sdebug_info[256];
2826 static const char * scsi_debug_info(struct Scsi_Host * shp)
2828 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2829 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2830 scsi_debug_version_date, scsi_debug_dev_size_mb,
2835 /* scsi_debug_proc_info
2836 * Used if the driver currently has no own support for /proc/scsi
2838 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
2842 int minLen = length > 15 ? 15 : length;
2844 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2846 memcpy(arr, buffer, minLen);
2848 if (1 != sscanf(arr, "%d", &opts))
2850 scsi_debug_opts = opts;
2851 if (scsi_debug_every_nth != 0)
2852 scsi_debug_cmnd_count = 0;
2856 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
2858 seq_printf(m, "scsi_debug adapter driver, version "
2860 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2861 "every_nth=%d(curr:%d)\n"
2862 "delay=%d, max_luns=%d, scsi_level=%d\n"
2863 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2864 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2865 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2866 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2867 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2868 scsi_debug_cmnd_count, scsi_debug_delay,
2869 scsi_debug_max_luns, scsi_debug_scsi_level,
2870 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2871 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2872 num_host_resets, dix_reads, dix_writes, dif_errors);
2876 static ssize_t delay_show(struct device_driver *ddp, char *buf)
2878 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2881 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
2887 if (1 == sscanf(buf, "%10s", work)) {
2888 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2889 scsi_debug_delay = delay;
2895 static DRIVER_ATTR_RW(delay);
2897 static ssize_t opts_show(struct device_driver *ddp, char *buf)
2899 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2902 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
2908 if (1 == sscanf(buf, "%10s", work)) {
2909 if (0 == strnicmp(work,"0x", 2)) {
2910 if (1 == sscanf(&work[2], "%x", &opts))
2913 if (1 == sscanf(work, "%d", &opts))
2919 scsi_debug_opts = opts;
2920 scsi_debug_cmnd_count = 0;
2923 static DRIVER_ATTR_RW(opts);
2925 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
2927 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2929 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
2934 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2935 scsi_debug_ptype = n;
2940 static DRIVER_ATTR_RW(ptype);
2942 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
2944 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2946 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
2951 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2952 scsi_debug_dsense = n;
2957 static DRIVER_ATTR_RW(dsense);
2959 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
2961 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2963 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
2968 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2969 scsi_debug_fake_rw = n;
2974 static DRIVER_ATTR_RW(fake_rw);
2976 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
2978 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2980 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
2985 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2986 scsi_debug_no_lun_0 = n;
2991 static DRIVER_ATTR_RW(no_lun_0);
2993 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
2995 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2997 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3002 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3003 scsi_debug_num_tgts = n;
3004 sdebug_max_tgts_luns();
3009 static DRIVER_ATTR_RW(num_tgts);
3011 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3013 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3015 static DRIVER_ATTR_RO(dev_size_mb);
3017 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3019 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3021 static DRIVER_ATTR_RO(num_parts);
3023 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3025 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3027 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3032 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3033 scsi_debug_every_nth = nth;
3034 scsi_debug_cmnd_count = 0;
3039 static DRIVER_ATTR_RW(every_nth);
3041 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3043 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3045 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3050 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3051 scsi_debug_max_luns = n;
3052 sdebug_max_tgts_luns();
3057 static DRIVER_ATTR_RW(max_luns);
3059 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3061 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3063 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3068 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3069 (n <= SCSI_DEBUG_CANQUEUE)) {
3070 scsi_debug_max_queue = n;
3075 static DRIVER_ATTR_RW(max_queue);
3077 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3079 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3081 static DRIVER_ATTR_RO(no_uld);
3083 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3085 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3087 static DRIVER_ATTR_RO(scsi_level);
3089 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3091 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3093 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3098 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3099 scsi_debug_virtual_gb = n;
3101 sdebug_capacity = get_sdebug_capacity();
3107 static DRIVER_ATTR_RW(virtual_gb);
3109 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3111 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3114 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3119 if (sscanf(buf, "%d", &delta_hosts) != 1)
3121 if (delta_hosts > 0) {
3123 sdebug_add_adapter();
3124 } while (--delta_hosts);
3125 } else if (delta_hosts < 0) {
3127 sdebug_remove_adapter();
3128 } while (++delta_hosts);
3132 static DRIVER_ATTR_RW(add_host);
3134 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3136 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3138 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3143 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3144 scsi_debug_vpd_use_hostno = n;
3149 static DRIVER_ATTR_RW(vpd_use_hostno);
3151 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3153 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3155 static DRIVER_ATTR_RO(sector_size);
3157 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3159 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3161 static DRIVER_ATTR_RO(dix);
3163 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3165 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3167 static DRIVER_ATTR_RO(dif);
3169 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3171 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3173 static DRIVER_ATTR_RO(guard);
3175 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3177 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3179 static DRIVER_ATTR_RO(ato);
3181 static ssize_t map_show(struct device_driver *ddp, char *buf)
3185 if (!scsi_debug_lbp())
3186 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3187 sdebug_store_sectors);
3189 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3191 buf[count++] = '\n';
3196 static DRIVER_ATTR_RO(map);
3198 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3200 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3202 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3207 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3208 scsi_debug_removable = (n > 0);
3213 static DRIVER_ATTR_RW(removable);
3215 /* Note: The following array creates attribute files in the
3216 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3217 files (over those found in the /sys/module/scsi_debug/parameters
3218 directory) is that auxiliary actions can be triggered when an attribute
3219 is changed. For example see: sdebug_add_host_store() above.
3222 static struct attribute *sdebug_drv_attrs[] = {
3223 &driver_attr_delay.attr,
3224 &driver_attr_opts.attr,
3225 &driver_attr_ptype.attr,
3226 &driver_attr_dsense.attr,
3227 &driver_attr_fake_rw.attr,
3228 &driver_attr_no_lun_0.attr,
3229 &driver_attr_num_tgts.attr,
3230 &driver_attr_dev_size_mb.attr,
3231 &driver_attr_num_parts.attr,
3232 &driver_attr_every_nth.attr,
3233 &driver_attr_max_luns.attr,
3234 &driver_attr_max_queue.attr,
3235 &driver_attr_no_uld.attr,
3236 &driver_attr_scsi_level.attr,
3237 &driver_attr_virtual_gb.attr,
3238 &driver_attr_add_host.attr,
3239 &driver_attr_vpd_use_hostno.attr,
3240 &driver_attr_sector_size.attr,
3241 &driver_attr_dix.attr,
3242 &driver_attr_dif.attr,
3243 &driver_attr_guard.attr,
3244 &driver_attr_ato.attr,
3245 &driver_attr_map.attr,
3246 &driver_attr_removable.attr,
3249 ATTRIBUTE_GROUPS(sdebug_drv);
3251 struct device *pseudo_primary;
3253 static int __init scsi_debug_init(void)
3260 switch (scsi_debug_sector_size) {
3267 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3268 scsi_debug_sector_size);
3272 switch (scsi_debug_dif) {
3274 case SD_DIF_TYPE0_PROTECTION:
3275 case SD_DIF_TYPE1_PROTECTION:
3276 case SD_DIF_TYPE2_PROTECTION:
3277 case SD_DIF_TYPE3_PROTECTION:
3281 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3285 if (scsi_debug_guard > 1) {
3286 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3290 if (scsi_debug_ato > 1) {
3291 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3295 if (scsi_debug_physblk_exp > 15) {
3296 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3297 scsi_debug_physblk_exp);
3301 if (scsi_debug_lowest_aligned > 0x3fff) {
3302 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3303 scsi_debug_lowest_aligned);
3307 if (scsi_debug_dev_size_mb < 1)
3308 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3309 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3310 sdebug_store_sectors = sz / scsi_debug_sector_size;
3311 sdebug_capacity = get_sdebug_capacity();
3313 /* play around with geometry, don't waste too much on track 0 */
3315 sdebug_sectors_per = 32;
3316 if (scsi_debug_dev_size_mb >= 16)
3318 else if (scsi_debug_dev_size_mb >= 256)
3320 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3321 (sdebug_sectors_per * sdebug_heads);
3322 if (sdebug_cylinders_per >= 1024) {
3323 /* other LLDs do this; implies >= 1GB ram disk ... */
3325 sdebug_sectors_per = 63;
3326 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3327 (sdebug_sectors_per * sdebug_heads);
3330 fake_storep = vmalloc(sz);
3331 if (NULL == fake_storep) {
3332 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3335 memset(fake_storep, 0, sz);
3336 if (scsi_debug_num_parts > 0)
3337 sdebug_build_parts(fake_storep, sz);
3339 if (scsi_debug_dix) {
3342 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3343 dif_storep = vmalloc(dif_size);
3345 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3346 dif_size, dif_storep);
3348 if (dif_storep == NULL) {
3349 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3354 memset(dif_storep, 0xff, dif_size);
3357 /* Logical Block Provisioning */
3358 if (scsi_debug_lbp()) {
3359 scsi_debug_unmap_max_blocks =
3360 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3362 scsi_debug_unmap_max_desc =
3363 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3365 scsi_debug_unmap_granularity =
3366 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3368 if (scsi_debug_unmap_alignment &&
3369 scsi_debug_unmap_granularity <=
3370 scsi_debug_unmap_alignment) {
3372 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3377 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3378 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3380 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3383 if (map_storep == NULL) {
3384 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3389 bitmap_zero(map_storep, map_size);
3391 /* Map first 1KB for partition table */
3392 if (scsi_debug_num_parts)
3396 pseudo_primary = root_device_register("pseudo_0");
3397 if (IS_ERR(pseudo_primary)) {
3398 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3399 ret = PTR_ERR(pseudo_primary);
3402 ret = bus_register(&pseudo_lld_bus);
3404 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3408 ret = driver_register(&sdebug_driverfs_driver);
3410 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3417 host_to_add = scsi_debug_add_host;
3418 scsi_debug_add_host = 0;
3420 for (k = 0; k < host_to_add; k++) {
3421 if (sdebug_add_adapter()) {
3422 printk(KERN_ERR "scsi_debug_init: "
3423 "sdebug_add_adapter failed k=%d\n", k);
3428 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3429 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3430 scsi_debug_add_host);
3435 bus_unregister(&pseudo_lld_bus);
3437 root_device_unregister(pseudo_primary);
3448 static void __exit scsi_debug_exit(void)
3450 int k = scsi_debug_add_host;
3454 sdebug_remove_adapter();
3455 driver_unregister(&sdebug_driverfs_driver);
3456 bus_unregister(&pseudo_lld_bus);
3457 root_device_unregister(pseudo_primary);
3465 device_initcall(scsi_debug_init);
3466 module_exit(scsi_debug_exit);
3468 static void sdebug_release_adapter(struct device * dev)
3470 struct sdebug_host_info *sdbg_host;
3472 sdbg_host = to_sdebug_host(dev);
3476 static int sdebug_add_adapter(void)
3478 int k, devs_per_host;
3480 struct sdebug_host_info *sdbg_host;
3481 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3483 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3484 if (NULL == sdbg_host) {
3485 printk(KERN_ERR "%s: out of memory at line %d\n",
3486 __func__, __LINE__);
3490 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3492 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3493 for (k = 0; k < devs_per_host; k++) {
3494 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3495 if (!sdbg_devinfo) {
3496 printk(KERN_ERR "%s: out of memory at line %d\n",
3497 __func__, __LINE__);
3503 spin_lock(&sdebug_host_list_lock);
3504 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3505 spin_unlock(&sdebug_host_list_lock);
3507 sdbg_host->dev.bus = &pseudo_lld_bus;
3508 sdbg_host->dev.parent = pseudo_primary;
3509 sdbg_host->dev.release = &sdebug_release_adapter;
3510 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3512 error = device_register(&sdbg_host->dev);
3517 ++scsi_debug_add_host;
3521 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3523 list_del(&sdbg_devinfo->dev_list);
3524 kfree(sdbg_devinfo);
3531 static void sdebug_remove_adapter(void)
3533 struct sdebug_host_info * sdbg_host = NULL;
3535 spin_lock(&sdebug_host_list_lock);
3536 if (!list_empty(&sdebug_host_list)) {
3537 sdbg_host = list_entry(sdebug_host_list.prev,
3538 struct sdebug_host_info, host_list);
3539 list_del(&sdbg_host->host_list);
3541 spin_unlock(&sdebug_host_list_lock);
3546 device_unregister(&sdbg_host->dev);
3547 --scsi_debug_add_host;
3551 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3553 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3556 unsigned long long lba;
3559 int target = SCpnt->device->id;
3560 struct sdebug_dev_info *devip = NULL;
3561 int inj_recovered = 0;
3562 int inj_transport = 0;
3565 int delay_override = 0;
3568 scsi_set_resid(SCpnt, 0);
3569 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3570 printk(KERN_INFO "scsi_debug: cmd ");
3571 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3572 printk("%02x ", (int)cmd[k]);
3576 if (target == SCpnt->device->host->hostt->this_id) {
3577 printk(KERN_INFO "scsi_debug: initiator's id used as "
3579 return schedule_resp(SCpnt, NULL, done,
3580 DID_NO_CONNECT << 16, 0);
3583 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3584 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3585 return schedule_resp(SCpnt, NULL, done,
3586 DID_NO_CONNECT << 16, 0);
3587 devip = devInfoReg(SCpnt->device);
3589 return schedule_resp(SCpnt, NULL, done,
3590 DID_NO_CONNECT << 16, 0);
3592 if ((scsi_debug_every_nth != 0) &&
3593 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3594 scsi_debug_cmnd_count = 0;
3595 if (scsi_debug_every_nth < -1)
3596 scsi_debug_every_nth = -1;
3597 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3598 return 0; /* ignore command causing timeout */
3599 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3600 scsi_medium_access_command(SCpnt))
3601 return 0; /* time out reads and writes */
3602 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3603 inj_recovered = 1; /* to reads and writes below */
3604 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3605 inj_transport = 1; /* to reads and writes below */
3606 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3607 inj_dif = 1; /* to reads and writes below */
3608 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3609 inj_dix = 1; /* to reads and writes below */
3616 case TEST_UNIT_READY:
3618 break; /* only allowable wlun commands */
3620 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3621 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3622 "not supported for wlun\n", *cmd);
3623 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3625 errsts = check_condition_result;
3626 return schedule_resp(SCpnt, devip, done, errsts,
3632 case INQUIRY: /* mandatory, ignore unit attention */
3634 errsts = resp_inquiry(SCpnt, target, devip);
3636 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3638 errsts = resp_requests(SCpnt, devip);
3640 case REZERO_UNIT: /* actually this is REWIND for SSC */
3642 errsts = resp_start_stop(SCpnt, devip);
3644 case ALLOW_MEDIUM_REMOVAL:
3645 errsts = check_readiness(SCpnt, 1, devip);
3648 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3649 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3650 cmd[4] ? "inhibited" : "enabled");
3652 case SEND_DIAGNOSTIC: /* mandatory */
3653 errsts = check_readiness(SCpnt, 1, devip);
3655 case TEST_UNIT_READY: /* mandatory */
3657 errsts = check_readiness(SCpnt, 0, devip);
3660 errsts = check_readiness(SCpnt, 1, devip);
3663 errsts = check_readiness(SCpnt, 1, devip);
3666 errsts = check_readiness(SCpnt, 1, devip);
3669 errsts = check_readiness(SCpnt, 1, devip);
3672 errsts = resp_readcap(SCpnt, devip);
3674 case SERVICE_ACTION_IN:
3675 if (cmd[1] == SAI_READ_CAPACITY_16)
3676 errsts = resp_readcap16(SCpnt, devip);
3677 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3679 if (scsi_debug_lbp() == 0) {
3680 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3681 INVALID_COMMAND_OPCODE, 0);
3682 errsts = check_condition_result;
3684 errsts = resp_get_lba_status(SCpnt, devip);
3686 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3688 errsts = check_condition_result;
3691 case MAINTENANCE_IN:
3692 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3693 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3695 errsts = check_condition_result;
3698 errsts = resp_report_tgtpgs(SCpnt, devip);
3703 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3704 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3706 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3707 INVALID_COMMAND_OPCODE, 0);
3708 errsts = check_condition_result;
3712 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3713 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3714 (cmd[1] & 0xe0) == 0)
3715 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3720 errsts = check_readiness(SCpnt, 0, devip);
3723 if (scsi_debug_fake_rw)
3725 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3726 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3727 if (inj_recovered && (0 == errsts)) {
3728 mk_sense_buffer(devip, RECOVERED_ERROR,
3729 THRESHOLD_EXCEEDED, 0);
3730 errsts = check_condition_result;
3731 } else if (inj_transport && (0 == errsts)) {
3732 mk_sense_buffer(devip, ABORTED_COMMAND,
3733 TRANSPORT_PROBLEM, ACK_NAK_TO);
3734 errsts = check_condition_result;
3735 } else if (inj_dif && (0 == errsts)) {
3736 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3737 errsts = illegal_condition_result;
3738 } else if (inj_dix && (0 == errsts)) {
3739 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3740 errsts = illegal_condition_result;
3743 case REPORT_LUNS: /* mandatory, ignore unit attention */
3745 errsts = resp_report_luns(SCpnt, devip);
3747 case VERIFY: /* 10 byte SBC-2 command */
3748 errsts = check_readiness(SCpnt, 0, devip);
3753 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3754 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3756 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3757 INVALID_COMMAND_OPCODE, 0);
3758 errsts = check_condition_result;
3762 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3763 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3764 (cmd[1] & 0xe0) == 0)
3765 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3770 errsts = check_readiness(SCpnt, 0, devip);
3773 if (scsi_debug_fake_rw)
3775 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3776 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3777 if (inj_recovered && (0 == errsts)) {
3778 mk_sense_buffer(devip, RECOVERED_ERROR,
3779 THRESHOLD_EXCEEDED, 0);
3780 errsts = check_condition_result;
3781 } else if (inj_dif && (0 == errsts)) {
3782 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3783 errsts = illegal_condition_result;
3784 } else if (inj_dix && (0 == errsts)) {
3785 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3786 errsts = illegal_condition_result;
3792 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3793 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3794 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3795 INVALID_FIELD_IN_CDB, 0);
3796 errsts = check_condition_result;
3802 errsts = check_readiness(SCpnt, 0, devip);
3805 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3806 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3809 errsts = check_readiness(SCpnt, 0, devip);
3813 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3814 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3815 INVALID_COMMAND_OPCODE, 0);
3816 errsts = check_condition_result;
3818 errsts = resp_unmap(SCpnt, devip);
3822 errsts = resp_mode_sense(SCpnt, target, devip);
3825 errsts = resp_mode_select(SCpnt, 1, devip);
3827 case MODE_SELECT_10:
3828 errsts = resp_mode_select(SCpnt, 0, devip);
3831 errsts = resp_log_sense(SCpnt, devip);
3833 case SYNCHRONIZE_CACHE:
3835 errsts = check_readiness(SCpnt, 0, devip);
3838 errsts = check_readiness(SCpnt, 1, devip);
3840 case XDWRITEREAD_10:
3841 if (!scsi_bidi_cmnd(SCpnt)) {
3842 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3843 INVALID_FIELD_IN_CDB, 0);
3844 errsts = check_condition_result;
3848 errsts = check_readiness(SCpnt, 0, devip);
3851 if (scsi_debug_fake_rw)
3853 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3854 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3857 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3860 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3862 case VARIABLE_LENGTH_CMD:
3863 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3865 if ((cmd[10] & 0xe0) == 0)
3867 "Unprotected RD/WR to DIF device\n");
3869 if (cmd[9] == READ_32) {
3870 BUG_ON(SCpnt->cmd_len < 32);
3874 if (cmd[9] == WRITE_32) {
3875 BUG_ON(SCpnt->cmd_len < 32);
3880 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3881 INVALID_FIELD_IN_CDB, 0);
3882 errsts = check_condition_result;
3886 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3887 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3888 "supported\n", *cmd);
3889 errsts = check_readiness(SCpnt, 1, devip);
3891 break; /* Unit attention takes precedence */
3892 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3893 errsts = check_condition_result;
3896 return schedule_resp(SCpnt, devip, done, errsts,
3897 (delay_override ? 0 : scsi_debug_delay));
3900 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3902 static struct scsi_host_template sdebug_driver_template = {
3903 .show_info = scsi_debug_show_info,
3904 .write_info = scsi_debug_write_info,
3905 .proc_name = sdebug_proc_name,
3906 .name = "SCSI DEBUG",
3907 .info = scsi_debug_info,
3908 .slave_alloc = scsi_debug_slave_alloc,
3909 .slave_configure = scsi_debug_slave_configure,
3910 .slave_destroy = scsi_debug_slave_destroy,
3911 .ioctl = scsi_debug_ioctl,
3912 .queuecommand = scsi_debug_queuecommand,
3913 .eh_abort_handler = scsi_debug_abort,
3914 .eh_bus_reset_handler = scsi_debug_bus_reset,
3915 .eh_device_reset_handler = scsi_debug_device_reset,
3916 .eh_host_reset_handler = scsi_debug_host_reset,
3917 .bios_param = scsi_debug_biosparam,
3918 .can_queue = SCSI_DEBUG_CANQUEUE,
3920 .sg_tablesize = 256,
3922 .max_sectors = 0xffff,
3923 .use_clustering = DISABLE_CLUSTERING,
3924 .module = THIS_MODULE,
3927 static int sdebug_driver_probe(struct device * dev)
3930 struct sdebug_host_info *sdbg_host;
3931 struct Scsi_Host *hpnt;
3934 sdbg_host = to_sdebug_host(dev);
3936 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3937 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3939 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3944 sdbg_host->shost = hpnt;
3945 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3946 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3947 hpnt->max_id = scsi_debug_num_tgts + 1;
3949 hpnt->max_id = scsi_debug_num_tgts;
3950 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3954 switch (scsi_debug_dif) {
3956 case SD_DIF_TYPE1_PROTECTION:
3957 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3959 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3962 case SD_DIF_TYPE2_PROTECTION:
3963 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3965 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3968 case SD_DIF_TYPE3_PROTECTION:
3969 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3971 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3976 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3980 scsi_host_set_prot(hpnt, host_prot);
3982 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3983 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3984 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3985 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3986 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3987 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3988 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3989 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3991 if (scsi_debug_guard == 1)
3992 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3994 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3996 error = scsi_add_host(hpnt, &sdbg_host->dev);
3998 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4000 scsi_host_put(hpnt);
4002 scsi_scan_host(hpnt);
4008 static int sdebug_driver_remove(struct device * dev)
4010 struct sdebug_host_info *sdbg_host;
4011 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4013 sdbg_host = to_sdebug_host(dev);
4016 printk(KERN_ERR "%s: Unable to locate host info\n",
4021 scsi_remove_host(sdbg_host->shost);
4023 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4025 list_del(&sdbg_devinfo->dev_list);
4026 kfree(sdbg_devinfo);
4029 scsi_host_put(sdbg_host->shost);
4033 static int pseudo_lld_bus_match(struct device *dev,
4034 struct device_driver *dev_driver)
4039 static struct bus_type pseudo_lld_bus = {
4041 .match = pseudo_lld_bus_match,
4042 .probe = sdebug_driver_probe,
4043 .remove = sdebug_driver_remove,
4044 .drv_groups = sdebug_drv_groups,