1 /*******************************************************************************
2 * Filename: target_core_file.c
4 * This file contains the Storage Engine <-> FILEIO transport specific functions
6 * Copyright (c) 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/string.h>
30 #include <linux/parser.h>
31 #include <linux/timer.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/module.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_backend.h>
42 #include "target_core_file.h"
44 static inline struct fd_dev *FD_DEV(struct se_device *dev)
46 return container_of(dev, struct fd_dev, dev);
49 /* fd_attach_hba(): (Part of se_subsystem_api_t template)
53 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
55 struct fd_host *fd_host;
57 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
59 pr_err("Unable to allocate memory for struct fd_host\n");
63 fd_host->fd_host_id = host_id;
65 hba->hba_ptr = fd_host;
67 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
68 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
69 TARGET_CORE_MOD_VERSION);
70 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
72 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
77 static void fd_detach_hba(struct se_hba *hba)
79 struct fd_host *fd_host = hba->hba_ptr;
81 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
82 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
88 static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
90 struct fd_dev *fd_dev;
91 struct fd_host *fd_host = hba->hba_ptr;
93 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
95 pr_err("Unable to allocate memory for struct fd_dev\n");
99 fd_dev->fd_host = fd_host;
101 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
106 static int fd_configure_device(struct se_device *dev)
108 struct fd_dev *fd_dev = FD_DEV(dev);
109 struct fd_host *fd_host = dev->se_hba->hba_ptr;
111 struct inode *inode = NULL;
112 int flags, ret = -EINVAL;
114 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
115 pr_err("Missing fd_dev_name=\n");
120 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
121 * of pure timestamp updates.
123 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
126 * Optionally allow fd_buffered_io=1 to be enabled for people
127 * who want use the fs buffer cache as an WriteCache mechanism.
129 * This means that in event of a hard failure, there is a risk
130 * of silent data-loss if the SCSI client has *not* performed a
131 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
132 * to write-out the entire device cache.
134 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
135 pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
139 file = filp_open(fd_dev->fd_dev_name, flags, 0600);
141 pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
145 fd_dev->fd_file = file;
147 * If using a block backend with this struct file, we extract
148 * fd_dev->fd_[block,dev]_size from struct block_device.
150 * Otherwise, we use the passed fd_size= from configfs
152 inode = file->f_mapping->host;
153 if (S_ISBLK(inode->i_mode)) {
154 struct request_queue *q = bdev_get_queue(inode->i_bdev);
155 unsigned long long dev_size;
157 dev->dev_attrib.hw_block_size =
158 bdev_logical_block_size(inode->i_bdev);
159 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
162 * Determine the number of bytes from i_size_read() minus
163 * one (1) logical sector from underlying struct block_device
165 dev_size = (i_size_read(file->f_mapping->host) -
166 fd_dev->fd_block_size);
168 pr_debug("FILEIO: Using size: %llu bytes from struct"
169 " block_device blocks: %llu logical_block_size: %d\n",
170 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
171 fd_dev->fd_block_size);
173 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
174 pr_err("FILEIO: Missing fd_dev_size="
175 " parameter, and no backing struct"
180 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
181 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
184 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
186 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
188 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
189 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
190 " with FDBD_HAS_BUFFERED_IO_WCE\n");
191 dev->dev_attrib.emulate_write_cache = 1;
194 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
195 fd_dev->fd_queue_depth = dev->queue_depth;
197 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
198 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
199 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
203 if (fd_dev->fd_file) {
204 filp_close(fd_dev->fd_file, NULL);
205 fd_dev->fd_file = NULL;
210 static void fd_free_device(struct se_device *dev)
212 struct fd_dev *fd_dev = FD_DEV(dev);
214 if (fd_dev->fd_file) {
215 filp_close(fd_dev->fd_file, NULL);
216 fd_dev->fd_file = NULL;
222 static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
225 struct se_device *se_dev = cmd->se_dev;
226 struct fd_dev *dev = FD_DEV(se_dev);
227 struct file *fd = dev->fd_file;
228 struct scatterlist *sg;
231 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
234 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
236 pr_err("Unable to allocate fd_do_readv iov[]\n");
240 for_each_sg(sgl, sg, sgl_nents, i) {
241 iov[i].iov_len = sg->length;
242 iov[i].iov_base = sg_virt(sg);
247 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
252 * Return zeros and GOOD status even if the READ did not return
253 * the expected virt_size for struct file w/o a backing struct
256 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
257 if (ret < 0 || ret != cmd->data_length) {
258 pr_err("vfs_readv() returned %d,"
259 " expecting %d for S_ISBLK\n", ret,
260 (int)cmd->data_length);
261 return (ret < 0 ? ret : -EINVAL);
265 pr_err("vfs_readv() returned %d for non"
274 static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
277 struct se_device *se_dev = cmd->se_dev;
278 struct fd_dev *dev = FD_DEV(se_dev);
279 struct file *fd = dev->fd_file;
280 struct scatterlist *sg;
283 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
286 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
288 pr_err("Unable to allocate fd_do_writev iov[]\n");
292 for_each_sg(sgl, sg, sgl_nents, i) {
293 iov[i].iov_len = sg->length;
294 iov[i].iov_base = sg_virt(sg);
299 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
304 if (ret < 0 || ret != cmd->data_length) {
305 pr_err("vfs_writev() returned %d\n", ret);
306 return (ret < 0 ? ret : -EINVAL);
312 static int fd_execute_sync_cache(struct se_cmd *cmd)
314 struct se_device *dev = cmd->se_dev;
315 struct fd_dev *fd_dev = FD_DEV(dev);
316 int immed = (cmd->t_task_cdb[1] & 0x2);
321 * If the Immediate bit is set, queue up the GOOD response
322 * for this SYNCHRONIZE_CACHE op
325 target_complete_cmd(cmd, SAM_STAT_GOOD);
328 * Determine if we will be flushing the entire device.
330 if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
334 start = cmd->t_task_lba * dev->dev_attrib.block_size;
335 if (cmd->data_length)
336 end = start + cmd->data_length;
341 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
343 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
349 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
350 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
352 target_complete_cmd(cmd, SAM_STAT_GOOD);
358 static int fd_execute_rw(struct se_cmd *cmd)
360 struct scatterlist *sgl = cmd->t_data_sg;
361 u32 sgl_nents = cmd->t_data_nents;
362 enum dma_data_direction data_direction = cmd->data_direction;
363 struct se_device *dev = cmd->se_dev;
367 * Call vectorized fileio functions to map struct scatterlist
368 * physical memory addresses to struct iovec virtual memory.
370 if (data_direction == DMA_FROM_DEVICE) {
371 ret = fd_do_readv(cmd, sgl, sgl_nents);
373 ret = fd_do_writev(cmd, sgl, sgl_nents);
375 * Perform implict vfs_fsync_range() for fd_do_writev() ops
376 * for SCSI WRITEs with Forced Unit Access (FUA) set.
377 * Allow this to happen independent of WCE=0 setting.
380 dev->dev_attrib.emulate_fua_write > 0 &&
381 (cmd->se_cmd_flags & SCF_FUA)) {
382 struct fd_dev *fd_dev = FD_DEV(dev);
383 loff_t start = cmd->t_task_lba *
384 dev->dev_attrib.block_size;
385 loff_t end = start + cmd->data_length;
387 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
392 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
396 target_complete_cmd(cmd, SAM_STAT_GOOD);
401 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
404 static match_table_t tokens = {
405 {Opt_fd_dev_name, "fd_dev_name=%s"},
406 {Opt_fd_dev_size, "fd_dev_size=%s"},
407 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
411 static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
412 const char *page, ssize_t count)
414 struct fd_dev *fd_dev = FD_DEV(dev);
415 char *orig, *ptr, *arg_p, *opts;
416 substring_t args[MAX_OPT_ARGS];
417 int ret = 0, arg, token;
419 opts = kstrdup(page, GFP_KERNEL);
425 while ((ptr = strsep(&opts, ",\n")) != NULL) {
429 token = match_token(ptr, tokens, args);
431 case Opt_fd_dev_name:
432 if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
433 FD_MAX_DEV_NAME) == 0) {
437 pr_debug("FILEIO: Referencing Path: %s\n",
438 fd_dev->fd_dev_name);
439 fd_dev->fbd_flags |= FBDF_HAS_PATH;
441 case Opt_fd_dev_size:
442 arg_p = match_strdup(&args[0]);
447 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
450 pr_err("strict_strtoull() failed for"
454 pr_debug("FILEIO: Referencing Size: %llu"
455 " bytes\n", fd_dev->fd_dev_size);
456 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
458 case Opt_fd_buffered_io:
459 match_int(args, &arg);
461 pr_err("bogus fd_buffered_io=%d value\n", arg);
466 pr_debug("FILEIO: Using buffered I/O"
467 " operations for struct fd_dev\n");
469 fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
478 return (!ret) ? count : ret;
481 static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
483 struct fd_dev *fd_dev = FD_DEV(dev);
486 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
487 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
488 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
489 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
490 "Buffered-WCE" : "O_DSYNC");
494 static sector_t fd_get_blocks(struct se_device *dev)
496 struct fd_dev *fd_dev = FD_DEV(dev);
497 struct file *f = fd_dev->fd_file;
498 struct inode *i = f->f_mapping->host;
499 unsigned long long dev_size;
501 * When using a file that references an underlying struct block_device,
502 * ensure dev_size is always based on the current inode size in order
503 * to handle underlying block_device resize operations.
505 if (S_ISBLK(i->i_mode))
506 dev_size = (i_size_read(i) - fd_dev->fd_block_size);
508 dev_size = fd_dev->fd_dev_size;
510 return div_u64(dev_size, dev->dev_attrib.block_size);
513 static struct sbc_ops fd_sbc_ops = {
514 .execute_rw = fd_execute_rw,
515 .execute_sync_cache = fd_execute_sync_cache,
518 static int fd_parse_cdb(struct se_cmd *cmd)
520 return sbc_parse_cdb(cmd, &fd_sbc_ops);
523 static struct se_subsystem_api fileio_template = {
525 .inquiry_prod = "FILEIO",
526 .inquiry_rev = FD_VERSION,
527 .owner = THIS_MODULE,
528 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
529 .attach_hba = fd_attach_hba,
530 .detach_hba = fd_detach_hba,
531 .alloc_device = fd_alloc_device,
532 .configure_device = fd_configure_device,
533 .free_device = fd_free_device,
534 .parse_cdb = fd_parse_cdb,
535 .set_configfs_dev_params = fd_set_configfs_dev_params,
536 .show_configfs_dev_params = fd_show_configfs_dev_params,
537 .get_device_rev = sbc_get_device_rev,
538 .get_device_type = sbc_get_device_type,
539 .get_blocks = fd_get_blocks,
542 static int __init fileio_module_init(void)
544 return transport_subsystem_register(&fileio_template);
547 static void fileio_module_exit(void)
549 transport_subsystem_release(&fileio_template);
552 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
553 MODULE_AUTHOR("nab@Linux-iSCSI.org");
554 MODULE_LICENSE("GPL");
556 module_init(fileio_module_init);
557 module_exit(fileio_module_exit);