target: make iblock_emulate_sync_cache asynchronous
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / target / target_core_iblock.c
1 /*******************************************************************************
2  * Filename:  target_core_iblock.c
3  *
4  * This file contains the Storage Engine  <-> Linux BlockIO transport
5  * specific functions.
6  *
7  * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9  * Copyright (c) 2007-2010 Rising Tide Systems
10  * Copyright (c) 2008-2010 Linux-iSCSI.org
11  *
12  * Nicholas A. Bellinger <nab@kernel.org>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27  *
28  ******************************************************************************/
29
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
33 #include <linux/fs.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <linux/bio.h>
38 #include <linux/genhd.h>
39 #include <linux/file.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_host.h>
42
43 #include <target/target_core_base.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_transport.h>
46
47 #include "target_core_iblock.h"
48
49 static struct se_subsystem_api iblock_template;
50
51 static void iblock_bio_done(struct bio *, int);
52
53 /*      iblock_attach_hba(): (Part of se_subsystem_api_t template)
54  *
55  *
56  */
57 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
58 {
59         struct iblock_hba *ib_host;
60
61         ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
62         if (!ib_host) {
63                 pr_err("Unable to allocate memory for"
64                                 " struct iblock_hba\n");
65                 return -ENOMEM;
66         }
67
68         ib_host->iblock_host_id = host_id;
69
70         hba->hba_ptr = ib_host;
71
72         pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
73                 " Generic Target Core Stack %s\n", hba->hba_id,
74                 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
75
76         pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
77                 hba->hba_id, ib_host->iblock_host_id);
78
79         return 0;
80 }
81
82 static void iblock_detach_hba(struct se_hba *hba)
83 {
84         struct iblock_hba *ib_host = hba->hba_ptr;
85
86         pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
87                 " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
88
89         kfree(ib_host);
90         hba->hba_ptr = NULL;
91 }
92
93 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
94 {
95         struct iblock_dev *ib_dev = NULL;
96         struct iblock_hba *ib_host = hba->hba_ptr;
97
98         ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
99         if (!ib_dev) {
100                 pr_err("Unable to allocate struct iblock_dev\n");
101                 return NULL;
102         }
103         ib_dev->ibd_host = ib_host;
104
105         pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
106
107         return ib_dev;
108 }
109
110 static struct se_device *iblock_create_virtdevice(
111         struct se_hba *hba,
112         struct se_subsystem_dev *se_dev,
113         void *p)
114 {
115         struct iblock_dev *ib_dev = p;
116         struct se_device *dev;
117         struct se_dev_limits dev_limits;
118         struct block_device *bd = NULL;
119         struct request_queue *q;
120         struct queue_limits *limits;
121         u32 dev_flags = 0;
122         int ret = -EINVAL;
123
124         if (!ib_dev) {
125                 pr_err("Unable to locate struct iblock_dev parameter\n");
126                 return ERR_PTR(ret);
127         }
128         memset(&dev_limits, 0, sizeof(struct se_dev_limits));
129         /*
130          * These settings need to be made tunable..
131          */
132         ib_dev->ibd_bio_set = bioset_create(32, 64);
133         if (!ib_dev->ibd_bio_set) {
134                 pr_err("IBLOCK: Unable to create bioset()\n");
135                 return ERR_PTR(-ENOMEM);
136         }
137         pr_debug("IBLOCK: Created bio_set()\n");
138         /*
139          * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
140          * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
141          */
142         pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
143                         ib_dev->ibd_udev_path);
144
145         bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
146                                 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
147         if (IS_ERR(bd)) {
148                 ret = PTR_ERR(bd);
149                 goto failed;
150         }
151         /*
152          * Setup the local scope queue_limits from struct request_queue->limits
153          * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
154          */
155         q = bdev_get_queue(bd);
156         limits = &dev_limits.limits;
157         limits->logical_block_size = bdev_logical_block_size(bd);
158         limits->max_hw_sectors = queue_max_hw_sectors(q);
159         limits->max_sectors = queue_max_sectors(q);
160         dev_limits.hw_queue_depth = q->nr_requests;
161         dev_limits.queue_depth = q->nr_requests;
162
163         ib_dev->ibd_bd = bd;
164
165         dev = transport_add_device_to_core_hba(hba,
166                         &iblock_template, se_dev, dev_flags, ib_dev,
167                         &dev_limits, "IBLOCK", IBLOCK_VERSION);
168         if (!dev)
169                 goto failed;
170
171         /*
172          * Check if the underlying struct block_device request_queue supports
173          * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
174          * in ATA and we need to set TPE=1
175          */
176         if (blk_queue_discard(q)) {
177                 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
178                                 q->limits.max_discard_sectors;
179                 /*
180                  * Currently hardcoded to 1 in Linux/SCSI code..
181                  */
182                 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
183                 dev->se_sub_dev->se_dev_attrib.unmap_granularity =
184                                 q->limits.discard_granularity;
185                 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
186                                 q->limits.discard_alignment;
187
188                 pr_debug("IBLOCK: BLOCK Discard support available,"
189                                 " disabled by default\n");
190         }
191
192         if (blk_queue_nonrot(q))
193                 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
194
195         return dev;
196
197 failed:
198         if (ib_dev->ibd_bio_set) {
199                 bioset_free(ib_dev->ibd_bio_set);
200                 ib_dev->ibd_bio_set = NULL;
201         }
202         ib_dev->ibd_bd = NULL;
203         return ERR_PTR(ret);
204 }
205
206 static void iblock_free_device(void *p)
207 {
208         struct iblock_dev *ib_dev = p;
209
210         if (ib_dev->ibd_bd != NULL)
211                 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
212         if (ib_dev->ibd_bio_set != NULL)
213                 bioset_free(ib_dev->ibd_bio_set);
214         kfree(ib_dev);
215 }
216
217 static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
218 {
219         return container_of(task, struct iblock_req, ib_task);
220 }
221
222 static struct se_task *
223 iblock_alloc_task(unsigned char *cdb)
224 {
225         struct iblock_req *ib_req;
226
227         ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
228         if (!ib_req) {
229                 pr_err("Unable to allocate memory for struct iblock_req\n");
230                 return NULL;
231         }
232
233         atomic_set(&ib_req->ib_bio_cnt, 0);
234         return &ib_req->ib_task;
235 }
236
237 static unsigned long long iblock_emulate_read_cap_with_block_size(
238         struct se_device *dev,
239         struct block_device *bd,
240         struct request_queue *q)
241 {
242         unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
243                                         bdev_logical_block_size(bd)) - 1);
244         u32 block_size = bdev_logical_block_size(bd);
245
246         if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
247                 return blocks_long;
248
249         switch (block_size) {
250         case 4096:
251                 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
252                 case 2048:
253                         blocks_long <<= 1;
254                         break;
255                 case 1024:
256                         blocks_long <<= 2;
257                         break;
258                 case 512:
259                         blocks_long <<= 3;
260                 default:
261                         break;
262                 }
263                 break;
264         case 2048:
265                 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
266                 case 4096:
267                         blocks_long >>= 1;
268                         break;
269                 case 1024:
270                         blocks_long <<= 1;
271                         break;
272                 case 512:
273                         blocks_long <<= 2;
274                         break;
275                 default:
276                         break;
277                 }
278                 break;
279         case 1024:
280                 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
281                 case 4096:
282                         blocks_long >>= 2;
283                         break;
284                 case 2048:
285                         blocks_long >>= 1;
286                         break;
287                 case 512:
288                         blocks_long <<= 1;
289                         break;
290                 default:
291                         break;
292                 }
293                 break;
294         case 512:
295                 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
296                 case 4096:
297                         blocks_long >>= 3;
298                         break;
299                 case 2048:
300                         blocks_long >>= 2;
301                         break;
302                 case 1024:
303                         blocks_long >>= 1;
304                         break;
305                 default:
306                         break;
307                 }
308                 break;
309         default:
310                 break;
311         }
312
313         return blocks_long;
314 }
315
316 static void iblock_end_io_flush(struct bio *bio, int err)
317 {
318         struct se_cmd *cmd = bio->bi_private;
319
320         if (err)
321                 pr_err("IBLOCK: cache flush failed: %d\n", err);
322
323         if (cmd)
324                 transport_complete_sync_cache(cmd, err == 0);
325         bio_put(bio);
326 }
327
328 /*
329  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
330  * always flush the whole cache.
331  */
332 static void iblock_emulate_sync_cache(struct se_task *task)
333 {
334         struct se_cmd *cmd = task->task_se_cmd;
335         struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
336         int immed = (cmd->t_task_cdb[1] & 0x2);
337         struct bio *bio;
338
339         /*
340          * If the Immediate bit is set, queue up the GOOD response
341          * for this SYNCHRONIZE_CACHE op.
342          */
343         if (immed)
344                 transport_complete_sync_cache(cmd, 1);
345
346         bio = bio_alloc(GFP_KERNEL, 0);
347         bio->bi_end_io = iblock_end_io_flush;
348         bio->bi_bdev = ib_dev->ibd_bd;
349         if (!immed)
350                 bio->bi_private = cmd;
351         submit_bio(WRITE_FLUSH, bio);
352 }
353
354 /*
355  * Tell TCM Core that we are capable of WriteCache emulation for
356  * an underlying struct se_device.
357  */
358 static int iblock_emulated_write_cache(struct se_device *dev)
359 {
360         return 1;
361 }
362
363 static int iblock_emulated_dpo(struct se_device *dev)
364 {
365         return 0;
366 }
367
368 /*
369  * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
370  * for TYPE_DISK.
371  */
372 static int iblock_emulated_fua_write(struct se_device *dev)
373 {
374         return 1;
375 }
376
377 static int iblock_emulated_fua_read(struct se_device *dev)
378 {
379         return 0;
380 }
381
382 static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
383 {
384         struct iblock_dev *ibd = dev->dev_ptr;
385         struct block_device *bd = ibd->ibd_bd;
386         int barrier = 0;
387
388         return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
389 }
390
391 static void iblock_free_task(struct se_task *task)
392 {
393         kfree(IBLOCK_REQ(task));
394 }
395
396 enum {
397         Opt_udev_path, Opt_force, Opt_err
398 };
399
400 static match_table_t tokens = {
401         {Opt_udev_path, "udev_path=%s"},
402         {Opt_force, "force=%d"},
403         {Opt_err, NULL}
404 };
405
406 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
407                                                struct se_subsystem_dev *se_dev,
408                                                const char *page, ssize_t count)
409 {
410         struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
411         char *orig, *ptr, *arg_p, *opts;
412         substring_t args[MAX_OPT_ARGS];
413         int ret = 0, token;
414
415         opts = kstrdup(page, GFP_KERNEL);
416         if (!opts)
417                 return -ENOMEM;
418
419         orig = opts;
420
421         while ((ptr = strsep(&opts, ",")) != NULL) {
422                 if (!*ptr)
423                         continue;
424
425                 token = match_token(ptr, tokens, args);
426                 switch (token) {
427                 case Opt_udev_path:
428                         if (ib_dev->ibd_bd) {
429                                 pr_err("Unable to set udev_path= while"
430                                         " ib_dev->ibd_bd exists\n");
431                                 ret = -EEXIST;
432                                 goto out;
433                         }
434                         arg_p = match_strdup(&args[0]);
435                         if (!arg_p) {
436                                 ret = -ENOMEM;
437                                 break;
438                         }
439                         snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
440                                         "%s", arg_p);
441                         kfree(arg_p);
442                         pr_debug("IBLOCK: Referencing UDEV path: %s\n",
443                                         ib_dev->ibd_udev_path);
444                         ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
445                         break;
446                 case Opt_force:
447                         break;
448                 default:
449                         break;
450                 }
451         }
452
453 out:
454         kfree(orig);
455         return (!ret) ? count : ret;
456 }
457
458 static ssize_t iblock_check_configfs_dev_params(
459         struct se_hba *hba,
460         struct se_subsystem_dev *se_dev)
461 {
462         struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
463
464         if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
465                 pr_err("Missing udev_path= parameters for IBLOCK\n");
466                 return -EINVAL;
467         }
468
469         return 0;
470 }
471
472 static ssize_t iblock_show_configfs_dev_params(
473         struct se_hba *hba,
474         struct se_subsystem_dev *se_dev,
475         char *b)
476 {
477         struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
478         struct block_device *bd = ibd->ibd_bd;
479         char buf[BDEVNAME_SIZE];
480         ssize_t bl = 0;
481
482         if (bd)
483                 bl += sprintf(b + bl, "iBlock device: %s",
484                                 bdevname(bd, buf));
485         if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
486                 bl += sprintf(b + bl, "  UDEV PATH: %s\n",
487                                 ibd->ibd_udev_path);
488         } else
489                 bl += sprintf(b + bl, "\n");
490
491         bl += sprintf(b + bl, "        ");
492         if (bd) {
493                 bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
494                         MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
495                         "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
496                         "CLAIMED: IBLOCK" : "CLAIMED: OS");
497         } else {
498                 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
499         }
500
501         return bl;
502 }
503
504 static void iblock_bio_destructor(struct bio *bio)
505 {
506         struct se_task *task = bio->bi_private;
507         struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
508
509         bio_free(bio, ib_dev->ibd_bio_set);
510 }
511
512 static struct bio *
513 iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
514 {
515         struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
516         struct iblock_req *ib_req = IBLOCK_REQ(task);
517         struct bio *bio;
518
519         bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
520         if (!bio) {
521                 pr_err("Unable to allocate memory for bio\n");
522                 return NULL;
523         }
524
525         pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
526                 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
527         pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
528
529         bio->bi_bdev = ib_dev->ibd_bd;
530         bio->bi_private = task;
531         bio->bi_destructor = iblock_bio_destructor;
532         bio->bi_end_io = &iblock_bio_done;
533         bio->bi_sector = lba;
534         atomic_inc(&ib_req->ib_bio_cnt);
535
536         pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
537         pr_debug("Set ib_req->ib_bio_cnt: %d\n",
538                         atomic_read(&ib_req->ib_bio_cnt));
539         return bio;
540 }
541
542 static int iblock_do_task(struct se_task *task)
543 {
544         struct se_cmd *cmd = task->task_se_cmd;
545         struct se_device *dev = cmd->se_dev;
546         struct bio *bio;
547         struct bio_list list;
548         struct scatterlist *sg;
549         u32 i, sg_num = task->task_sg_nents;
550         sector_t block_lba;
551         struct blk_plug plug;
552         int rw;
553
554         if (task->task_data_direction == DMA_TO_DEVICE) {
555                 /*
556                  * Force data to disk if we pretend to not have a volatile
557                  * write cache, or the initiator set the Force Unit Access bit.
558                  */
559                 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
560                     (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
561                      task->task_se_cmd->t_tasks_fua))
562                         rw = WRITE_FUA;
563                 else
564                         rw = WRITE;
565         } else {
566                 rw = READ;
567         }
568
569         /*
570          * Do starting conversion up from non 512-byte blocksize with
571          * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
572          */
573         if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
574                 block_lba = (task->task_lba << 3);
575         else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
576                 block_lba = (task->task_lba << 2);
577         else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
578                 block_lba = (task->task_lba << 1);
579         else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
580                 block_lba = task->task_lba;
581         else {
582                 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
583                                 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
584                 return PYX_TRANSPORT_LU_COMM_FAILURE;
585         }
586
587         bio = iblock_get_bio(task, block_lba, sg_num);
588         if (!bio)
589                 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
590
591         bio_list_init(&list);
592         bio_list_add(&list, bio);
593
594         for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
595                 /*
596                  * XXX: if the length the device accepts is shorter than the
597                  *      length of the S/G list entry this will cause and
598                  *      endless loop.  Better hope no driver uses huge pages.
599                  */
600                 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
601                                 != sg->length) {
602                         bio = iblock_get_bio(task, block_lba, sg_num);
603                         if (!bio)
604                                 goto fail;
605                         bio_list_add(&list, bio);
606                 }
607
608                 /* Always in 512 byte units for Linux/Block */
609                 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
610                 sg_num--;
611         }
612
613         blk_start_plug(&plug);
614         while ((bio = bio_list_pop(&list)))
615                 submit_bio(rw, bio);
616         blk_finish_plug(&plug);
617
618         return PYX_TRANSPORT_SENT_TO_TRANSPORT;
619
620 fail:
621         while ((bio = bio_list_pop(&list)))
622                 bio_put(bio);
623         return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
624 }
625
626 static u32 iblock_get_device_rev(struct se_device *dev)
627 {
628         return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
629 }
630
631 static u32 iblock_get_device_type(struct se_device *dev)
632 {
633         return TYPE_DISK;
634 }
635
636 static sector_t iblock_get_blocks(struct se_device *dev)
637 {
638         struct iblock_dev *ibd = dev->dev_ptr;
639         struct block_device *bd = ibd->ibd_bd;
640         struct request_queue *q = bdev_get_queue(bd);
641
642         return iblock_emulate_read_cap_with_block_size(dev, bd, q);
643 }
644
645 static void iblock_bio_done(struct bio *bio, int err)
646 {
647         struct se_task *task = bio->bi_private;
648         struct iblock_req *ibr = IBLOCK_REQ(task);
649
650         /*
651          * Set -EIO if !BIO_UPTODATE and the passed is still err=0
652          */
653         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
654                 err = -EIO;
655
656         if (err != 0) {
657                 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
658                         " err: %d\n", bio, err);
659                 /*
660                  * Bump the ib_bio_err_cnt and release bio.
661                  */
662                 atomic_inc(&ibr->ib_bio_err_cnt);
663                 smp_mb__after_atomic_inc();
664         }
665
666         bio_put(bio);
667
668         if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
669                 return;
670
671         pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
672                  task, bio, task->task_lba,
673                  (unsigned long long)bio->bi_sector, err);
674
675         transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
676 }
677
678 static struct se_subsystem_api iblock_template = {
679         .name                   = "iblock",
680         .owner                  = THIS_MODULE,
681         .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
682         .attach_hba             = iblock_attach_hba,
683         .detach_hba             = iblock_detach_hba,
684         .allocate_virtdevice    = iblock_allocate_virtdevice,
685         .create_virtdevice      = iblock_create_virtdevice,
686         .free_device            = iblock_free_device,
687         .dpo_emulated           = iblock_emulated_dpo,
688         .fua_write_emulated     = iblock_emulated_fua_write,
689         .fua_read_emulated      = iblock_emulated_fua_read,
690         .write_cache_emulated   = iblock_emulated_write_cache,
691         .alloc_task             = iblock_alloc_task,
692         .do_task                = iblock_do_task,
693         .do_discard             = iblock_do_discard,
694         .do_sync_cache          = iblock_emulate_sync_cache,
695         .free_task              = iblock_free_task,
696         .check_configfs_dev_params = iblock_check_configfs_dev_params,
697         .set_configfs_dev_params = iblock_set_configfs_dev_params,
698         .show_configfs_dev_params = iblock_show_configfs_dev_params,
699         .get_device_rev         = iblock_get_device_rev,
700         .get_device_type        = iblock_get_device_type,
701         .get_blocks             = iblock_get_blocks,
702 };
703
704 static int __init iblock_module_init(void)
705 {
706         return transport_subsystem_register(&iblock_template);
707 }
708
709 static void iblock_module_exit(void)
710 {
711         transport_subsystem_release(&iblock_template);
712 }
713
714 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
715 MODULE_AUTHOR("nab@Linux-iSCSI.org");
716 MODULE_LICENSE("GPL");
717
718 module_init(iblock_module_init);
719 module_exit(iblock_module_exit);