1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <linux/bio.h>
38 #include <linux/genhd.h>
39 #include <linux/file.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_host.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_transport.h>
47 #include "target_core_iblock.h"
49 static struct se_subsystem_api iblock_template;
51 static void iblock_bio_done(struct bio *, int);
53 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
57 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
59 struct iblock_hba *ib_host;
61 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
63 pr_err("Unable to allocate memory for"
64 " struct iblock_hba\n");
68 ib_host->iblock_host_id = host_id;
70 hba->hba_ptr = ib_host;
72 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
73 " Generic Target Core Stack %s\n", hba->hba_id,
74 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
76 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
77 hba->hba_id, ib_host->iblock_host_id);
82 static void iblock_detach_hba(struct se_hba *hba)
84 struct iblock_hba *ib_host = hba->hba_ptr;
86 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
87 " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
93 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
95 struct iblock_dev *ib_dev = NULL;
96 struct iblock_hba *ib_host = hba->hba_ptr;
98 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
100 pr_err("Unable to allocate struct iblock_dev\n");
103 ib_dev->ibd_host = ib_host;
105 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
110 static struct se_device *iblock_create_virtdevice(
112 struct se_subsystem_dev *se_dev,
115 struct iblock_dev *ib_dev = p;
116 struct se_device *dev;
117 struct se_dev_limits dev_limits;
118 struct block_device *bd = NULL;
119 struct request_queue *q;
120 struct queue_limits *limits;
125 pr_err("Unable to locate struct iblock_dev parameter\n");
128 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
130 * These settings need to be made tunable..
132 ib_dev->ibd_bio_set = bioset_create(32, 64);
133 if (!ib_dev->ibd_bio_set) {
134 pr_err("IBLOCK: Unable to create bioset()\n");
135 return ERR_PTR(-ENOMEM);
137 pr_debug("IBLOCK: Created bio_set()\n");
139 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
140 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
142 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
143 ib_dev->ibd_udev_path);
145 bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
146 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
152 * Setup the local scope queue_limits from struct request_queue->limits
153 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
155 q = bdev_get_queue(bd);
156 limits = &dev_limits.limits;
157 limits->logical_block_size = bdev_logical_block_size(bd);
158 limits->max_hw_sectors = queue_max_hw_sectors(q);
159 limits->max_sectors = queue_max_sectors(q);
160 dev_limits.hw_queue_depth = q->nr_requests;
161 dev_limits.queue_depth = q->nr_requests;
165 dev = transport_add_device_to_core_hba(hba,
166 &iblock_template, se_dev, dev_flags, ib_dev,
167 &dev_limits, "IBLOCK", IBLOCK_VERSION);
172 * Check if the underlying struct block_device request_queue supports
173 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
174 * in ATA and we need to set TPE=1
176 if (blk_queue_discard(q)) {
177 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
178 q->limits.max_discard_sectors;
180 * Currently hardcoded to 1 in Linux/SCSI code..
182 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
183 dev->se_sub_dev->se_dev_attrib.unmap_granularity =
184 q->limits.discard_granularity;
185 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
186 q->limits.discard_alignment;
188 pr_debug("IBLOCK: BLOCK Discard support available,"
189 " disabled by default\n");
192 if (blk_queue_nonrot(q))
193 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
198 if (ib_dev->ibd_bio_set) {
199 bioset_free(ib_dev->ibd_bio_set);
200 ib_dev->ibd_bio_set = NULL;
202 ib_dev->ibd_bd = NULL;
206 static void iblock_free_device(void *p)
208 struct iblock_dev *ib_dev = p;
210 if (ib_dev->ibd_bd != NULL)
211 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
212 if (ib_dev->ibd_bio_set != NULL)
213 bioset_free(ib_dev->ibd_bio_set);
217 static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
219 return container_of(task, struct iblock_req, ib_task);
222 static struct se_task *
223 iblock_alloc_task(unsigned char *cdb)
225 struct iblock_req *ib_req;
227 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
229 pr_err("Unable to allocate memory for struct iblock_req\n");
233 atomic_set(&ib_req->ib_bio_cnt, 0);
234 return &ib_req->ib_task;
237 static unsigned long long iblock_emulate_read_cap_with_block_size(
238 struct se_device *dev,
239 struct block_device *bd,
240 struct request_queue *q)
242 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
243 bdev_logical_block_size(bd)) - 1);
244 u32 block_size = bdev_logical_block_size(bd);
246 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
249 switch (block_size) {
251 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
265 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
280 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
295 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
316 static void iblock_end_io_flush(struct bio *bio, int err)
318 struct se_cmd *cmd = bio->bi_private;
321 pr_err("IBLOCK: cache flush failed: %d\n", err);
324 transport_complete_sync_cache(cmd, err == 0);
329 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
330 * always flush the whole cache.
332 static void iblock_emulate_sync_cache(struct se_task *task)
334 struct se_cmd *cmd = task->task_se_cmd;
335 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
336 int immed = (cmd->t_task_cdb[1] & 0x2);
340 * If the Immediate bit is set, queue up the GOOD response
341 * for this SYNCHRONIZE_CACHE op.
344 transport_complete_sync_cache(cmd, 1);
346 bio = bio_alloc(GFP_KERNEL, 0);
347 bio->bi_end_io = iblock_end_io_flush;
348 bio->bi_bdev = ib_dev->ibd_bd;
350 bio->bi_private = cmd;
351 submit_bio(WRITE_FLUSH, bio);
355 * Tell TCM Core that we are capable of WriteCache emulation for
356 * an underlying struct se_device.
358 static int iblock_emulated_write_cache(struct se_device *dev)
363 static int iblock_emulated_dpo(struct se_device *dev)
369 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
372 static int iblock_emulated_fua_write(struct se_device *dev)
377 static int iblock_emulated_fua_read(struct se_device *dev)
382 static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
384 struct iblock_dev *ibd = dev->dev_ptr;
385 struct block_device *bd = ibd->ibd_bd;
388 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
391 static void iblock_free_task(struct se_task *task)
393 kfree(IBLOCK_REQ(task));
397 Opt_udev_path, Opt_force, Opt_err
400 static match_table_t tokens = {
401 {Opt_udev_path, "udev_path=%s"},
402 {Opt_force, "force=%d"},
406 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
407 struct se_subsystem_dev *se_dev,
408 const char *page, ssize_t count)
410 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
411 char *orig, *ptr, *arg_p, *opts;
412 substring_t args[MAX_OPT_ARGS];
415 opts = kstrdup(page, GFP_KERNEL);
421 while ((ptr = strsep(&opts, ",")) != NULL) {
425 token = match_token(ptr, tokens, args);
428 if (ib_dev->ibd_bd) {
429 pr_err("Unable to set udev_path= while"
430 " ib_dev->ibd_bd exists\n");
434 arg_p = match_strdup(&args[0]);
439 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
442 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
443 ib_dev->ibd_udev_path);
444 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
455 return (!ret) ? count : ret;
458 static ssize_t iblock_check_configfs_dev_params(
460 struct se_subsystem_dev *se_dev)
462 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
464 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
465 pr_err("Missing udev_path= parameters for IBLOCK\n");
472 static ssize_t iblock_show_configfs_dev_params(
474 struct se_subsystem_dev *se_dev,
477 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
478 struct block_device *bd = ibd->ibd_bd;
479 char buf[BDEVNAME_SIZE];
483 bl += sprintf(b + bl, "iBlock device: %s",
485 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
486 bl += sprintf(b + bl, " UDEV PATH: %s\n",
489 bl += sprintf(b + bl, "\n");
491 bl += sprintf(b + bl, " ");
493 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
494 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
495 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
496 "CLAIMED: IBLOCK" : "CLAIMED: OS");
498 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
504 static void iblock_bio_destructor(struct bio *bio)
506 struct se_task *task = bio->bi_private;
507 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
509 bio_free(bio, ib_dev->ibd_bio_set);
513 iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
515 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
516 struct iblock_req *ib_req = IBLOCK_REQ(task);
519 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
521 pr_err("Unable to allocate memory for bio\n");
525 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
526 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
527 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
529 bio->bi_bdev = ib_dev->ibd_bd;
530 bio->bi_private = task;
531 bio->bi_destructor = iblock_bio_destructor;
532 bio->bi_end_io = &iblock_bio_done;
533 bio->bi_sector = lba;
534 atomic_inc(&ib_req->ib_bio_cnt);
536 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
537 pr_debug("Set ib_req->ib_bio_cnt: %d\n",
538 atomic_read(&ib_req->ib_bio_cnt));
542 static int iblock_do_task(struct se_task *task)
544 struct se_cmd *cmd = task->task_se_cmd;
545 struct se_device *dev = cmd->se_dev;
547 struct bio_list list;
548 struct scatterlist *sg;
549 u32 i, sg_num = task->task_sg_nents;
551 struct blk_plug plug;
554 if (task->task_data_direction == DMA_TO_DEVICE) {
556 * Force data to disk if we pretend to not have a volatile
557 * write cache, or the initiator set the Force Unit Access bit.
559 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
560 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
561 task->task_se_cmd->t_tasks_fua))
570 * Do starting conversion up from non 512-byte blocksize with
571 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
573 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
574 block_lba = (task->task_lba << 3);
575 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
576 block_lba = (task->task_lba << 2);
577 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
578 block_lba = (task->task_lba << 1);
579 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
580 block_lba = task->task_lba;
582 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
583 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
584 return PYX_TRANSPORT_LU_COMM_FAILURE;
587 bio = iblock_get_bio(task, block_lba, sg_num);
589 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
591 bio_list_init(&list);
592 bio_list_add(&list, bio);
594 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
596 * XXX: if the length the device accepts is shorter than the
597 * length of the S/G list entry this will cause and
598 * endless loop. Better hope no driver uses huge pages.
600 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
602 bio = iblock_get_bio(task, block_lba, sg_num);
605 bio_list_add(&list, bio);
608 /* Always in 512 byte units for Linux/Block */
609 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
613 blk_start_plug(&plug);
614 while ((bio = bio_list_pop(&list)))
616 blk_finish_plug(&plug);
618 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
621 while ((bio = bio_list_pop(&list)))
623 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
626 static u32 iblock_get_device_rev(struct se_device *dev)
628 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
631 static u32 iblock_get_device_type(struct se_device *dev)
636 static sector_t iblock_get_blocks(struct se_device *dev)
638 struct iblock_dev *ibd = dev->dev_ptr;
639 struct block_device *bd = ibd->ibd_bd;
640 struct request_queue *q = bdev_get_queue(bd);
642 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
645 static void iblock_bio_done(struct bio *bio, int err)
647 struct se_task *task = bio->bi_private;
648 struct iblock_req *ibr = IBLOCK_REQ(task);
651 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
653 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
657 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
658 " err: %d\n", bio, err);
660 * Bump the ib_bio_err_cnt and release bio.
662 atomic_inc(&ibr->ib_bio_err_cnt);
663 smp_mb__after_atomic_inc();
668 if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
671 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
672 task, bio, task->task_lba,
673 (unsigned long long)bio->bi_sector, err);
675 transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
678 static struct se_subsystem_api iblock_template = {
680 .owner = THIS_MODULE,
681 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
682 .attach_hba = iblock_attach_hba,
683 .detach_hba = iblock_detach_hba,
684 .allocate_virtdevice = iblock_allocate_virtdevice,
685 .create_virtdevice = iblock_create_virtdevice,
686 .free_device = iblock_free_device,
687 .dpo_emulated = iblock_emulated_dpo,
688 .fua_write_emulated = iblock_emulated_fua_write,
689 .fua_read_emulated = iblock_emulated_fua_read,
690 .write_cache_emulated = iblock_emulated_write_cache,
691 .alloc_task = iblock_alloc_task,
692 .do_task = iblock_do_task,
693 .do_discard = iblock_do_discard,
694 .do_sync_cache = iblock_emulate_sync_cache,
695 .free_task = iblock_free_task,
696 .check_configfs_dev_params = iblock_check_configfs_dev_params,
697 .set_configfs_dev_params = iblock_set_configfs_dev_params,
698 .show_configfs_dev_params = iblock_show_configfs_dev_params,
699 .get_device_rev = iblock_get_device_rev,
700 .get_device_type = iblock_get_device_type,
701 .get_blocks = iblock_get_blocks,
704 static int __init iblock_module_init(void)
706 return transport_subsystem_register(&iblock_template);
709 static void iblock_module_exit(void)
711 transport_subsystem_release(&iblock_template);
714 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
715 MODULE_AUTHOR("nab@Linux-iSCSI.org");
716 MODULE_LICENSE("GPL");
718 module_init(iblock_module_init);
719 module_exit(iblock_module_exit);